1
0
Fork 0

Merge branch 'drm-next' of git://people.freedesktop.org/~airlied/linux

Pull drm updates from Dave Airlie:
 "This is the main drm pull, it has a shared branch with some alsa
  crossover but everything should be acked by relevant people.

  New drivers:
     - ATMEL HLCDC driver
     - designware HDMI core support (used in multiple SoCs).

  core:
     - lots more atomic modesetting work, properties and atomic ioctl
       (hidden under option)
     - bridge rework allows support for Samsung exynos chromebooks to
       work finally.
     - some more panels supported

  i915:
     - atomic plane update support
     - DSI uses shared DSI infrastructure
     - Skylake basic support is all merged now
     - component framework used for i915/snd-hda interactions
     - write-combine cpu memory mappings
     - engine init code refactored
     - full ppgtt enabled where execlists are enabled.
     - cherryview rps/gpu turbo and pipe CRC support.

  radeon:
     - indirect draw support for evergreen/cayman
     - SMC and manual fan control for SI/CI
     - Displayport audio support

  amdkfd:
     - SDMA usermode queue support
     - replace suballocator usage with more suitable one
     - rework for allowing interfacing to more than radeon

  nouveau:
     - major renaming in prep for later splitting work
     - merge arm platform driver into nouveau
     - GK20A reclocking support

  msm:
     - conversion to atomic modesetting
     - YUV support for mdp4/5
     - eDP support
     - hw cursor for mdp5

  tegra:
     - conversion to atomic modesetting
     - better suspend/resume support for child devices

  rcar-du:
     - interlaced support

  imx:
     - move to using dw_hdmi shared support
     - mode_fixup support

  sti:
     - DVO support
     - HDMI infoframe support

  exynos:
     - refactoring and cleanup, removed lots of internal unnecessary
       abstraction
     - exynos7 DECON display controller support

  Along with the usual bunch of fixes, cleanups etc"

* 'drm-next' of git://people.freedesktop.org/~airlied/linux: (724 commits)
  drm/radeon: fix voltage setup on hawaii
  drm/radeon/dp: Set EDP_CONFIGURATION_SET for bridge chips if necessary
  drm/radeon: only enable kv/kb dpm interrupts once v3
  drm/radeon: workaround for CP HW bug on CIK
  drm/radeon: Don't try to enable write-combining without PAT
  drm/radeon: use 0-255 rather than 0-100 for pwm fan range
  drm/i915: Clamp efficient frequency to valid range
  drm/i915: Really ignore long HPD pulses on eDP
  drm/exynos: Add DECON driver
  drm/i915: Correct the base value while updating LP_OUTPUT_HOLD in MIPI_PORT_CTRL
  drm/i915: Insert a command barrier on BLT/BSD cache flushes
  drm/i915: Drop vblank wait from intel_dp_link_down
  drm/exynos: fix NULL pointer reference
  drm/exynos: remove exynos_plane_dpms
  drm/exynos: remove mode property of exynos crtc
  drm/exynos: Remove exynos_plane_dpms() call with no effect
  drm/i915: Squelch overzealous uncore reset WARN_ON
  drm/i915: Take runtime pm reference on hangcheck_info
  drm/i915: Correct the IOSF Dev_FN field for IOSF transfers
  drm/exynos: fix DMA_ATTR_NO_KERNEL_MAPPING usage
  ...
hifive-unleashed-5.1
Linus Torvalds 2015-02-16 15:48:00 -08:00
commit 796e1c5571
1138 changed files with 50811 additions and 34709 deletions

View File

@ -239,6 +239,14 @@
Driver supports dedicated render nodes.
</para></listitem>
</varlistentry>
<varlistentry>
<term>DRIVER_ATOMIC</term>
<listitem><para>
Driver supports atomic properties. In this case the driver
must implement appropriate obj->atomic_get_property() vfuncs
for any modeset objects with driver specific properties.
</para></listitem>
</varlistentry>
</variablelist>
</sect3>
<sect3>
@ -1377,7 +1385,7 @@ int max_width, max_height;</synopsis>
<itemizedlist>
<listitem>
DRM_PLANE_TYPE_PRIMARY represents a "main" plane for a CRTC. Primary
planes are the planes operated upon by by CRTC modesetting and flipping
planes are the planes operated upon by CRTC modesetting and flipping
operations described in <xref linkend="drm-kms-crtcops"/>.
</listitem>
<listitem>
@ -2362,6 +2370,7 @@ void intel_crt_init(struct drm_device *dev)
</sect2>
<sect2>
<title>Modeset Helper Functions Reference</title>
!Iinclude/drm/drm_crtc_helper.h
!Edrivers/gpu/drm/drm_crtc_helper.c
!Pdrivers/gpu/drm/drm_crtc_helper.c overview
</sect2>
@ -2564,8 +2573,8 @@ void intel_crt_init(struct drm_device *dev)
<td valign="top" >Description/Restrictions</td>
</tr>
<tr>
<td rowspan="25" valign="top" >DRM</td>
<td rowspan="4" valign="top" >Generic</td>
<td rowspan="36" valign="top" >DRM</td>
<td rowspan="5" valign="top" >Connector</td>
<td valign="top" >“EDID”</td>
<td valign="top" >BLOB | IMMUTABLE</td>
<td valign="top" >0</td>
@ -2594,7 +2603,14 @@ void intel_crt_init(struct drm_device *dev)
<td valign="top" >Contains tiling information for a connector.</td>
</tr>
<tr>
<td rowspan="1" valign="top" >Plane</td>
<td valign="top" >“CRTC_ID”</td>
<td valign="top" >OBJECT</td>
<td valign="top" >DRM_MODE_OBJECT_CRTC</td>
<td valign="top" >Connector</td>
<td valign="top" >CRTC that connector is attached to (atomic)</td>
</tr>
<tr>
<td rowspan="11" valign="top" >Plane</td>
<td valign="top" >“type”</td>
<td valign="top" >ENUM | IMMUTABLE</td>
<td valign="top" >{ "Overlay", "Primary", "Cursor" }</td>
@ -2602,6 +2618,76 @@ void intel_crt_init(struct drm_device *dev)
<td valign="top" >Plane type</td>
</tr>
<tr>
<td valign="top" >“SRC_X”</td>
<td valign="top" >RANGE</td>
<td valign="top" >Min=0, Max=UINT_MAX</td>
<td valign="top" >Plane</td>
<td valign="top" >Scanout source x coordinate in 16.16 fixed point (atomic)</td>
</tr>
<tr>
<td valign="top" >“SRC_Y”</td>
<td valign="top" >RANGE</td>
<td valign="top" >Min=0, Max=UINT_MAX</td>
<td valign="top" >Plane</td>
<td valign="top" >Scanout source y coordinate in 16.16 fixed point (atomic)</td>
</tr>
<tr>
<td valign="top" >“SRC_W”</td>
<td valign="top" >RANGE</td>
<td valign="top" >Min=0, Max=UINT_MAX</td>
<td valign="top" >Plane</td>
<td valign="top" >Scanout source width in 16.16 fixed point (atomic)</td>
</tr>
<tr>
<td valign="top" >“SRC_H”</td>
<td valign="top" >RANGE</td>
<td valign="top" >Min=0, Max=UINT_MAX</td>
<td valign="top" >Plane</td>
<td valign="top" >Scanout source height in 16.16 fixed point (atomic)</td>
</tr>
<tr>
<td valign="top" >“CRTC_X”</td>
<td valign="top" >SIGNED_RANGE</td>
<td valign="top" >Min=INT_MIN, Max=INT_MAX</td>
<td valign="top" >Plane</td>
<td valign="top" >Scanout CRTC (destination) x coordinate (atomic)</td>
</tr>
<tr>
<td valign="top" >“CRTC_Y”</td>
<td valign="top" >SIGNED_RANGE</td>
<td valign="top" >Min=INT_MIN, Max=INT_MAX</td>
<td valign="top" >Plane</td>
<td valign="top" >Scanout CRTC (destination) y coordinate (atomic)</td>
</tr>
<tr>
<td valign="top" >“CRTC_W”</td>
<td valign="top" >RANGE</td>
<td valign="top" >Min=0, Max=UINT_MAX</td>
<td valign="top" >Plane</td>
<td valign="top" >Scanout CRTC (destination) width (atomic)</td>
</tr>
<tr>
<td valign="top" >“CRTC_H”</td>
<td valign="top" >RANGE</td>
<td valign="top" >Min=0, Max=UINT_MAX</td>
<td valign="top" >Plane</td>
<td valign="top" >Scanout CRTC (destination) height (atomic)</td>
</tr>
<tr>
<td valign="top" >“FB_ID”</td>
<td valign="top" >OBJECT</td>
<td valign="top" >DRM_MODE_OBJECT_FB</td>
<td valign="top" >Plane</td>
<td valign="top" >Scanout framebuffer (atomic)</td>
</tr>
<tr>
<td valign="top" >“CRTC_ID”</td>
<td valign="top" >OBJECT</td>
<td valign="top" >DRM_MODE_OBJECT_CRTC</td>
<td valign="top" >Plane</td>
<td valign="top" >CRTC that plane is attached to (atomic)</td>
</tr>
<tr>
<td rowspan="2" valign="top" >DVI-I</td>
<td valign="top" >“subconnector”</td>
<td valign="top" >ENUM</td>
@ -3883,6 +3969,7 @@ int num_ioctls;</synopsis>
<title>Runtime Power Management</title>
!Pdrivers/gpu/drm/i915/intel_runtime_pm.c runtime pm
!Idrivers/gpu/drm/i915/intel_runtime_pm.c
!Idrivers/gpu/drm/i915/intel_uncore.c
</sect2>
<sect2>
<title>Interrupt Handling</title>
@ -3931,6 +4018,11 @@ int num_ioctls;</synopsis>
framebuffer compression and panel self refresh.
</para>
</sect2>
<sect2>
<title>Atomic Plane Helpers</title>
!Pdrivers/gpu/drm/i915/intel_atomic_plane.c atomic plane helpers
!Idrivers/gpu/drm/i915/intel_atomic_plane.c
</sect2>
<sect2>
<title>Output Probing</title>
<para>
@ -3949,6 +4041,11 @@ int num_ioctls;</synopsis>
<title>Panel Self Refresh PSR (PSR/SRD)</title>
!Pdrivers/gpu/drm/i915/intel_psr.c Panel Self Refresh (PSR/SRD)
!Idrivers/gpu/drm/i915/intel_psr.c
</sect2>
<sect2>
<title>Frame Buffer Compression (FBC)</title>
!Pdrivers/gpu/drm/i915/intel_fbc.c Frame Buffer Compression (FBC)
!Idrivers/gpu/drm/i915/intel_fbc.c
</sect2>
<sect2>
<title>DPIO</title>
@ -4052,12 +4149,33 @@ int num_ioctls;</synopsis>
<title>Batchbuffer Parsing</title>
!Pdrivers/gpu/drm/i915/i915_cmd_parser.c batch buffer command parser
!Idrivers/gpu/drm/i915/i915_cmd_parser.c
</sect2>
<sect2>
<title>Batchbuffer Pools</title>
!Pdrivers/gpu/drm/i915/i915_gem_batch_pool.c batch pool
!Idrivers/gpu/drm/i915/i915_gem_batch_pool.c
</sect2>
<sect2>
<title>Logical Rings, Logical Ring Contexts and Execlists</title>
!Pdrivers/gpu/drm/i915/intel_lrc.c Logical Rings, Logical Ring Contexts and Execlists
!Idrivers/gpu/drm/i915/intel_lrc.c
</sect2>
<sect2>
<title>Global GTT views</title>
!Pdrivers/gpu/drm/i915/i915_gem_gtt.c Global GTT views
!Idrivers/gpu/drm/i915/i915_gem_gtt.c
</sect2>
<sect2>
<title>Buffer Object Eviction</title>
<para>
This section documents the interface function for evicting buffer
objects to make space available in the virtual gpu address spaces.
Note that this is mostly orthogonal to shrinking buffer objects
caches, which has the goal to make main memory (shared with the gpu
through the unified memory architecture) available.
</para>
!Idrivers/gpu/drm/i915/i915_gem_evict.c
</sect2>
</sect1>
<sect1>

View File

@ -0,0 +1,53 @@
Device-Tree bindings for Atmel's HLCDC (High LCD Controller) DRM driver
The Atmel HLCDC Display Controller is subdevice of the HLCDC MFD device.
See ../mfd/atmel-hlcdc.txt for more details.
Required properties:
- compatible: value should be "atmel,hlcdc-display-controller"
- pinctrl-names: the pin control state names. Should contain "default".
- pinctrl-0: should contain the default pinctrl states.
- #address-cells: should be set to 1.
- #size-cells: should be set to 0.
Required children nodes:
Children nodes are encoding available output ports and their connections
to external devices using the OF graph reprensentation (see ../graph.txt).
At least one port node is required.
Example:
hlcdc: hlcdc@f0030000 {
compatible = "atmel,sama5d3-hlcdc";
reg = <0xf0030000 0x2000>;
interrupts = <36 IRQ_TYPE_LEVEL_HIGH 0>;
clocks = <&lcdc_clk>, <&lcdck>, <&clk32k>;
clock-names = "periph_clk","sys_clk", "slow_clk";
status = "disabled";
hlcdc-display-controller {
compatible = "atmel,hlcdc-display-controller";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_lcd_base &pinctrl_lcd_rgb888>;
#address-cells = <1>;
#size-cells = <0>;
port@0 {
#address-cells = <1>;
#size-cells = <0>;
reg = <0>;
hlcdc_panel_output: endpoint@0 {
reg = <0>;
remote-endpoint = <&panel_input>;
};
};
};
hlcdc_pwm: hlcdc-pwm {
compatible = "atmel,hlcdc-pwm";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_lcd_pwm>;
#pwm-cells = <3>;
};
};

View File

@ -0,0 +1,50 @@
DesignWare HDMI bridge bindings
Required properties:
- compatible: platform specific such as:
* "snps,dw-hdmi-tx"
* "fsl,imx6q-hdmi"
* "fsl,imx6dl-hdmi"
* "rockchip,rk3288-dw-hdmi"
- reg: Physical base address and length of the controller's registers.
- interrupts: The HDMI interrupt number
- clocks, clock-names : must have the phandles to the HDMI iahb and isfr clocks,
as described in Documentation/devicetree/bindings/clock/clock-bindings.txt,
the clocks are soc specific, the clock-names should be "iahb", "isfr"
-port@[X]: SoC specific port nodes with endpoint definitions as defined
in Documentation/devicetree/bindings/media/video-interfaces.txt,
please refer to the SoC specific binding document:
* Documentation/devicetree/bindings/drm/imx/hdmi.txt
* Documentation/devicetree/bindings/video/dw_hdmi-rockchip.txt
Optional properties
- reg-io-width: the width of the reg:1,4, default set to 1 if not present
- ddc-i2c-bus: phandle of an I2C controller used for DDC EDID probing
- clocks, clock-names: phandle to the HDMI CEC clock, name should be "cec"
Example:
hdmi: hdmi@0120000 {
compatible = "fsl,imx6q-hdmi";
reg = <0x00120000 0x9000>;
interrupts = <0 115 0x04>;
gpr = <&gpr>;
clocks = <&clks 123>, <&clks 124>;
clock-names = "iahb", "isfr";
ddc-i2c-bus = <&i2c2>;
port@0 {
reg = <0>;
hdmi_mux_0: endpoint {
remote-endpoint = <&ipu1_di0_hdmi>;
};
};
port@1 {
reg = <1>;
hdmi_mux_1: endpoint {
remote-endpoint = <&ipu1_di1_hdmi>;
};
};
};

View File

@ -2,6 +2,8 @@ Qualcomm adreno/snapdragon hdmi output
Required properties:
- compatible: one of the following
* "qcom,hdmi-tx-8084"
* "qcom,hdmi-tx-8074"
* "qcom,hdmi-tx-8660"
* "qcom,hdmi-tx-8960"
- reg: Physical base address and length of the controller's registers

View File

@ -83,6 +83,22 @@ sti-hda:
- clock-names: names of the clocks listed in clocks property in the same
order.
sti-dvo:
Required properties:
must be a child of sti-tvout
- compatible: "st,stih<chip>-dvo"
- reg: Physical base address of the IP registers and length of memory mapped region.
- reg-names: names of the mapped memory regions listed in regs property in
the same order.
- clocks: from common clock binding: handle hardware IP needed clocks, the
number of clocks may depend of the SoC type.
See ../clocks/clock-bindings.txt for details.
- clock-names: names of the clocks listed in clocks property in the same
order.
- pinctrl-0: pin control handle
- pinctrl-name: names of the pin control to use
- sti,panel: phandle of the panel connected to the DVO output
sti-hqvdp:
must be a child of sti-display-subsystem
Required properties:
@ -198,6 +214,19 @@ Example:
clock-names = "pix", "hddac";
clocks = <&clockgen_c_vcc CLK_S_PIX_HD>, <&clockgen_c_vcc CLK_S_HDDAC>;
};
sti-dvo@8d00400 {
compatible = "st,stih407-dvo";
reg = <0x8d00400 0x200>;
reg-names = "dvo-reg";
clock-names = "dvo_pix", "dvo",
"main_parent", "aux_parent";
clocks = <&clk_s_d2_flexgen CLK_PIX_DVO>, <&clk_s_d2_flexgen CLK_DVO>,
<&clk_s_d2_quadfs 0>, <&clk_s_d2_quadfs 1>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_dvo>;
sti,panel = <&panel_dvo>;
};
};
sti-hqvdp@9c000000 {

View File

@ -0,0 +1,7 @@
Shanghai AVIC Optoelectronics 7" 1024x600 color TFT-LCD panel
Required properties:
- compatible: should be "avic,tm070ddh03"
This binding is compatible with the simple-panel binding, which is specified
in simple-panel.txt in this directory.

View File

@ -0,0 +1,7 @@
GiantPlus GPG48273QS5 4.3" (480x272) WQVGA TFT LCD panel
Required properties:
- compatible: should be "giantplus,gpg48273qs5"
This binding is compatible with the simple-panel binding, which is specified
in simple-panel.txt in this directory.

View File

@ -25,6 +25,7 @@ asahi-kasei Asahi Kasei Corp.
atmel Atmel Corporation
auo AU Optronics Corporation
avago Avago Technologies
avic Shanghai AVIC Optoelectronics Co., Ltd.
bosch Bosch Sensortec GmbH
brcm Broadcom Corporation
buffalo Buffalo, Inc.
@ -68,6 +69,7 @@ fsl Freescale Semiconductor
GEFanuc GE Fanuc Intelligent Platforms Embedded Systems, Inc.
gef GE Fanuc Intelligent Platforms Embedded Systems, Inc.
geniatech Geniatech, Inc.
giantplus Giantplus Technology Co., Ltd.
globalscale Globalscale Technologies, Inc.
gmt Global Mixed-mode Technology, Inc.
google Google, Inc.
@ -126,6 +128,7 @@ onnn ON Semiconductor Corp.
opencores OpenCores.org
ovti OmniVision Technologies
panasonic Panasonic Corporation
parade Parade Technologies Inc.
pericom Pericom Technology Inc.
phytec PHYTEC Messtechnik GmbH
picochip Picochip Ltd

View File

@ -0,0 +1,31 @@
ps8622-bridge bindings
Required properties:
- compatible: "parade,ps8622" or "parade,ps8625"
- reg: first i2c address of the bridge
- sleep-gpios: OF device-tree gpio specification for PD_ pin.
- reset-gpios: OF device-tree gpio specification for RST_ pin.
Optional properties:
- lane-count: number of DP lanes to use
- use-external-pwm: backlight will be controlled by an external PWM
- video interfaces: Device node can contain video interface port
nodes for panel according to [1].
[1]: Documentation/devicetree/bindings/media/video-interfaces.txt
Example:
lvds-bridge@48 {
compatible = "parade,ps8622";
reg = <0x48>;
sleep-gpios = <&gpc3 6 1 0 0>;
reset-gpios = <&gpc3 1 1 0 0>;
lane-count = <1>;
ports {
port@0 {
bridge_out: endpoint {
remote-endpoint = <&panel_in>;
};
};
};
};

View File

@ -3,8 +3,8 @@ ptn3460 bridge bindings
Required properties:
- compatible: "nxp,ptn3460"
- reg: i2c address of the bridge
- powerdown-gpio: OF device-tree gpio specification
- reset-gpio: OF device-tree gpio specification
- powerdown-gpio: OF device-tree gpio specification for PD_N pin.
- reset-gpio: OF device-tree gpio specification for RST_N pin.
- edid-emulation: The EDID emulation entry to use
+-------+------------+------------------+
| Value | Resolution | Description |
@ -17,6 +17,11 @@ Required properties:
| 6 | 1600x900 | ChiMei M215HGE |
+-------+------------+------------------+
- video interfaces: Device node can contain video interface port
nodes for panel according to [1].
[1]: Documentation/devicetree/bindings/media/video-interfaces.txt
Example:
lvds-bridge@20 {
compatible = "nxp,ptn3460";
@ -24,4 +29,11 @@ Example:
powerdown-gpio = <&gpy2 5 1 0 0>;
reset-gpio = <&gpx1 5 1 0 0>;
edid-emulation = <5>;
ports {
port@0 {
bridge_out: endpoint {
remote-endpoint = <&panel_in>;
};
};
};
};

View File

@ -0,0 +1,46 @@
Rockchip specific extensions to the Synopsys Designware HDMI
================================
Required properties:
- compatible: "rockchip,rk3288-dw-hdmi";
- reg: Physical base address and length of the controller's registers.
- clocks: phandle to hdmi iahb and isfr clocks.
- clock-names: should be "iahb" "isfr"
- rockchip,grf: this soc should set GRF regs to mux vopl/vopb.
- interrupts: HDMI interrupt number
- ports: contain a port node with endpoint definitions as defined in
Documentation/devicetree/bindings/media/video-interfaces.txt. For
vopb,set the reg = <0> and set the reg = <1> for vopl.
- reg-io-width: the width of the reg:1,4, the value should be 4 on
rk3288 platform
Optional properties
- ddc-i2c-bus: phandle of an I2C controller used for DDC EDID probing
- clocks, clock-names: phandle to the HDMI CEC clock, name should be "cec"
Example:
hdmi: hdmi@ff980000 {
compatible = "rockchip,rk3288-dw-hdmi";
reg = <0xff980000 0x20000>;
reg-io-width = <4>;
ddc-i2c-bus = <&i2c5>;
rockchip,grf = <&grf>;
interrupts = <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cru PCLK_HDMI_CTRL>, <&cru SCLK_HDMI_HDCP>;
clock-names = "iahb", "isfr";
status = "disabled";
ports {
hdmi_in: port {
#address-cells = <1>;
#size-cells = <0>;
hdmi_in_vopb: endpoint@0 {
reg = <0>;
remote-endpoint = <&vopb_out_hdmi>;
};
hdmi_in_vopl: endpoint@1 {
reg = <1>;
remote-endpoint = <&vopl_out_hdmi>;
};
};
};
};

View File

@ -0,0 +1,68 @@
Device-Tree bindings for Samsung Exynos7 SoC display controller (DECON)
DECON (Display and Enhancement Controller) is the Display Controller for the
Exynos7 series of SoCs which transfers the image data from a video memory
buffer to an external LCD interface.
Required properties:
- compatible: value should be "samsung,exynos7-decon";
- reg: physical base address and length of the DECON registers set.
- interrupt-parent: should be the phandle of the decon controller's
parent interrupt controller.
- interrupts: should contain a list of all DECON IP block interrupts in the
order: FIFO Level, VSYNC, LCD_SYSTEM. The interrupt specifier
format depends on the interrupt controller used.
- interrupt-names: should contain the interrupt names: "fifo", "vsync",
"lcd_sys", in the same order as they were listed in the interrupts
property.
- pinctrl-0: pin control group to be used for this controller.
- pinctrl-names: must contain a "default" entry.
- clocks: must include clock specifiers corresponding to entries in the
clock-names property.
- clock-names: list of clock names sorted in the same order as the clocks
property. Must contain "pclk_decon0", "aclk_decon0",
"decon0_eclk", "decon0_vclk".
- i80-if-timings: timing configuration for lcd i80 interface support.
Optional Properties:
- samsung,power-domain: a phandle to DECON power domain node.
- display-timings: timing settings for DECON, as described in document [1].
Can be used in case timings cannot be provided otherwise
or to override timings provided by the panel.
[1]: Documentation/devicetree/bindings/video/display-timing.txt
Example:
SoC specific DT entry:
decon@13930000 {
compatible = "samsung,exynos7-decon";
interrupt-parent = <&combiner>;
reg = <0x13930000 0x1000>;
interrupt-names = "lcd_sys", "vsync", "fifo";
interrupts = <0 188 0>, <0 189 0>, <0 190 0>;
clocks = <&clock_disp PCLK_DECON_INT>,
<&clock_disp ACLK_DECON_INT>,
<&clock_disp SCLK_DECON_INT_ECLK>,
<&clock_disp SCLK_DECON_INT_EXTCLKPLL>;
clock-names = "pclk_decon0", "aclk_decon0", "decon0_eclk",
"decon0_vclk";
status = "disabled";
};
Board specific DT entry:
decon@13930000 {
pinctrl-0 = <&lcd_clk &pwm1_out>;
pinctrl-names = "default";
status = "okay";
};

View File

@ -66,6 +66,10 @@ Optional properties for dp-controller:
Hotplug detect GPIO.
Indicates which GPIO should be used for hotplug
detection
-video interfaces: Device node can contain video interface port
nodes according to [1].
[1]: Documentation/devicetree/bindings/media/video-interfaces.txt
Example:
@ -105,4 +109,12 @@ Board Specific portion:
vsync-len = <6>;
};
};
ports {
port@0 {
dp_out: endpoint {
remote-endpoint = <&bridge_in>;
};
};
};
};

View File

@ -15,6 +15,7 @@ Required properties:
a) mixer: Gate of Mixer IP bus clock.
b) sclk_hdmi: HDMI Special clock, one of the two possible inputs of
mixer mux.
c) hdmi: Gate of HDMI IP bus clock, needed together with sclk_hdmi.
Example:

View File

@ -26,6 +26,10 @@ Required Properties:
per LVDS encoder. The functional clocks must be named "du.x" with "x"
being the channel numerical index. The LVDS clocks must be named
"lvds.x" with "x" being the LVDS encoder numerical index.
- In addition to the functional and encoder clocks, all DU versions also
support externally supplied pixel clocks. Those clocks are optional.
When supplied they must be named "dclkin.x" with "x" being the input
clock numerical index.
Required nodes:

View File

@ -630,6 +630,8 @@ L: dri-devel@lists.freedesktop.org
T: git git://people.freedesktop.org/~gabbayo/linux.git
S: Supported
F: drivers/gpu/drm/amd/amdkfd/
F: drivers/gpu/drm/amd/include/cik_structs.h
F: drivers/gpu/drm/amd/include/kgd_kfd_interface.h
F: drivers/gpu/drm/radeon/radeon_kfd.c
F: drivers/gpu/drm/radeon/radeon_kfd.h
F: include/uapi/linux/kfd_ioctl.h

View File

@ -219,7 +219,10 @@ struct agp_bridge_data *agp_generic_find_bridge(struct pci_dev *pdev);
/* generic functions for user-populated AGP memory types */
struct agp_memory *agp_generic_alloc_user(size_t page_count, int type);
void agp_alloc_page_array(size_t size, struct agp_memory *mem);
void agp_free_page_array(struct agp_memory *mem);
static inline void agp_free_page_array(struct agp_memory *mem)
{
kvfree(mem->pages);
}
/* generic routines for agp>=3 */

View File

@ -98,17 +98,6 @@ void agp_alloc_page_array(size_t size, struct agp_memory *mem)
}
EXPORT_SYMBOL(agp_alloc_page_array);
void agp_free_page_array(struct agp_memory *mem)
{
if (is_vmalloc_addr(mem->pages)) {
vfree(mem->pages);
} else {
kfree(mem->pages);
}
}
EXPORT_SYMBOL(agp_free_page_array);
static struct agp_memory *agp_create_user_memory(unsigned long num_agp_pages)
{
struct agp_memory *new;

View File

@ -225,7 +225,7 @@ static int i810_insert_dcache_entries(struct agp_memory *mem, off_t pg_start,
intel_private.driver->write_entry(addr,
i, type);
}
readl(intel_private.gtt+i-1);
wmb();
return 0;
}
@ -329,7 +329,7 @@ static void i810_write_entry(dma_addr_t addr, unsigned int entry,
break;
}
writel(addr | pte_flags, intel_private.gtt + entry);
writel_relaxed(addr | pte_flags, intel_private.gtt + entry);
}
static const struct aper_size_info_fixed intel_fake_agp_sizes[] = {
@ -735,7 +735,7 @@ static void i830_write_entry(dma_addr_t addr, unsigned int entry,
if (flags == AGP_USER_CACHED_MEMORY)
pte_flags |= I830_PTE_SYSTEM_CACHED;
writel(addr | pte_flags, intel_private.gtt + entry);
writel_relaxed(addr | pte_flags, intel_private.gtt + entry);
}
bool intel_enable_gtt(void)
@ -858,7 +858,7 @@ void intel_gtt_insert_sg_entries(struct sg_table *st,
j++;
}
}
readl(intel_private.gtt+j-1);
wmb();
}
EXPORT_SYMBOL(intel_gtt_insert_sg_entries);
@ -875,7 +875,7 @@ static void intel_gtt_insert_pages(unsigned int first_entry,
intel_private.driver->write_entry(addr,
j, flags);
}
readl(intel_private.gtt+j-1);
wmb();
}
static int intel_fake_agp_insert_entries(struct agp_memory *mem,
@ -938,7 +938,7 @@ void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries)
intel_private.driver->write_entry(intel_private.scratch_page_dma,
i, 0);
}
readl(intel_private.gtt+i-1);
wmb();
}
EXPORT_SYMBOL(intel_gtt_clear_range);
@ -1106,7 +1106,7 @@ static void i965_write_entry(dma_addr_t addr,
/* Shift high bits down */
addr |= (addr >> 28) & 0xf0;
writel(addr | pte_flags, intel_private.gtt + entry);
writel_relaxed(addr | pte_flags, intel_private.gtt + entry);
}
static int i9xx_setup(void)

View File

@ -1,3 +1,6 @@
obj-y += drm/ vga/
# drm/tegra depends on host1x, so if both drivers are built-in care must be
# taken to initialize them in the correct order. Link order is the only way
# to ensure this currently.
obj-$(CONFIG_TEGRA_HOST1X) += host1x/
obj-y += drm/ vga/
obj-$(CONFIG_IMX_IPUV3_CORE) += ipu-v3/

View File

@ -62,12 +62,13 @@ config DRM_TTM
config DRM_GEM_CMA_HELPER
bool
depends on DRM
depends on DRM && HAVE_DMA_ATTRS
help
Choose this if you need the GEM CMA helper functions
config DRM_KMS_CMA_HELPER
bool
depends on DRM && HAVE_DMA_ATTRS
select DRM_GEM_CMA_HELPER
select DRM_KMS_FB_HELPER
select FB_SYS_FILLRECT
@ -110,7 +111,6 @@ config DRM_RADEON
select HWMON
select BACKLIGHT_CLASS_DEVICE
select INTERVAL_TREE
select MMU_NOTIFIER
help
Choose this option if you have an ATI Radeon graphics card. There
are both PCI and AGP versions. You don't need to choose this to
@ -183,6 +183,8 @@ source "drivers/gpu/drm/cirrus/Kconfig"
source "drivers/gpu/drm/armada/Kconfig"
source "drivers/gpu/drm/atmel-hlcdc/Kconfig"
source "drivers/gpu/drm/rcar-du/Kconfig"
source "drivers/gpu/drm/shmobile/Kconfig"

View File

@ -14,7 +14,7 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \
drm_info.o drm_debugfs.o drm_encoder_slave.o \
drm_trace_points.o drm_global.o drm_prime.o \
drm_rect.o drm_vma_manager.o drm_flip_work.o \
drm_modeset_lock.o drm_atomic.o
drm_modeset_lock.o drm_atomic.o drm_bridge.o
drm-$(CONFIG_COMPAT) += drm_ioc32.o
drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o
@ -55,6 +55,7 @@ obj-$(CONFIG_DRM_GMA500) += gma500/
obj-$(CONFIG_DRM_UDL) += udl/
obj-$(CONFIG_DRM_AST) += ast/
obj-$(CONFIG_DRM_ARMADA) += armada/
obj-$(CONFIG_DRM_ATMEL_HLCDC) += atmel-hlcdc/
obj-$(CONFIG_DRM_RCAR_DU) += rcar-du/
obj-$(CONFIG_DRM_SHMOBILE) +=shmobile/
obj-$(CONFIG_DRM_OMAP) += omapdrm/

View File

@ -7,7 +7,10 @@ ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/amd/include/
amdkfd-y := kfd_module.o kfd_device.o kfd_chardev.o kfd_topology.o \
kfd_pasid.o kfd_doorbell.o kfd_flat_memory.o \
kfd_process.o kfd_queue.o kfd_mqd_manager.o \
kfd_kernel_queue.o kfd_packet_manager.o \
kfd_process_queue_manager.o kfd_device_queue_manager.o
kfd_mqd_manager_cik.o kfd_mqd_manager_vi.o \
kfd_kernel_queue.o kfd_kernel_queue_cik.o \
kfd_kernel_queue_vi.o kfd_packet_manager.o \
kfd_process_queue_manager.o kfd_device_queue_manager.o \
kfd_device_queue_manager_cik.o kfd_device_queue_manager_vi.o \
obj-$(CONFIG_HSA_AMD) += amdkfd.o

View File

@ -168,6 +168,8 @@
#define IB_ATC_EN (1U << 23)
#define DEFAULT_MIN_IB_AVAIL_SIZE (3U << 20)
#define AQL_ENABLE 1
#define CP_HQD_DEQUEUE_REQUEST 0xC974
#define DEQUEUE_REQUEST_DRAIN 1
#define DEQUEUE_REQUEST_RESET 2
@ -188,6 +190,17 @@
#define MQD_VMID_MASK (0xf << 0)
#define MQD_CONTROL_PRIV_STATE_EN (1U << 8)
#define SDMA_RB_VMID(x) (x << 24)
#define SDMA_RB_ENABLE (1 << 0)
#define SDMA_RB_SIZE(x) ((x) << 1) /* log2 */
#define SDMA_RPTR_WRITEBACK_ENABLE (1 << 12)
#define SDMA_RPTR_WRITEBACK_TIMER(x) ((x) << 16) /* log2 */
#define SDMA_OFFSET(x) (x << 0)
#define SDMA_DB_ENABLE (1 << 28)
#define SDMA_ATC (1 << 0)
#define SDMA_VA_PTR32 (1 << 4)
#define SDMA_VA_SHARED_BASE(x) (x << 8)
#define GRBM_GFX_INDEX 0x30800
#define INSTANCE_INDEX(x) ((x) << 0)
#define SH_INDEX(x) ((x) << 8)

View File

@ -178,6 +178,22 @@ static int set_queue_properties_from_user(struct queue_properties *q_properties,
return -EFAULT;
}
if (args->eop_buffer_address &&
!access_ok(VERIFY_WRITE,
(const void __user *) args->eop_buffer_address,
sizeof(uint32_t))) {
pr_debug("kfd: can't access eop buffer");
return -EFAULT;
}
if (args->ctx_save_restore_address &&
!access_ok(VERIFY_WRITE,
(const void __user *) args->ctx_save_restore_address,
sizeof(uint32_t))) {
pr_debug("kfd: can't access ctx save restore buffer");
return -EFAULT;
}
q_properties->is_interop = false;
q_properties->queue_percent = args->queue_percentage;
q_properties->priority = args->queue_priority;
@ -185,9 +201,16 @@ static int set_queue_properties_from_user(struct queue_properties *q_properties,
q_properties->queue_size = args->ring_size;
q_properties->read_ptr = (uint32_t *) args->read_pointer_address;
q_properties->write_ptr = (uint32_t *) args->write_pointer_address;
q_properties->eop_ring_buffer_address = args->eop_buffer_address;
q_properties->eop_ring_buffer_size = args->eop_buffer_size;
q_properties->ctx_save_restore_area_address =
args->ctx_save_restore_address;
q_properties->ctx_save_restore_area_size = args->ctx_save_restore_size;
if (args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE ||
args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE_AQL)
q_properties->type = KFD_QUEUE_TYPE_COMPUTE;
else if (args->queue_type == KFD_IOC_QUEUE_TYPE_SDMA)
q_properties->type = KFD_QUEUE_TYPE_SDMA;
else
return -ENOTSUPP;
@ -214,6 +237,11 @@ static int set_queue_properties_from_user(struct queue_properties *q_properties,
pr_debug("Queue Format (%d)\n", q_properties->format);
pr_debug("Queue EOP (0x%llX)\n", q_properties->eop_ring_buffer_address);
pr_debug("Queue CTX save arex (0x%llX)\n",
q_properties->ctx_save_restore_area_address);
return 0;
}
@ -235,9 +263,12 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
if (err)
return err;
pr_debug("kfd: looking for gpu id 0x%x\n", args->gpu_id);
dev = kfd_device_by_id(args->gpu_id);
if (dev == NULL)
if (dev == NULL) {
pr_debug("kfd: gpu id 0x%x was not found\n", args->gpu_id);
return -EINVAL;
}
mutex_lock(&p->mutex);
@ -251,8 +282,8 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
p->pasid,
dev->id);
err = pqm_create_queue(&p->pqm, dev, filep, &q_properties, 0,
KFD_QUEUE_TYPE_COMPUTE, &queue_id);
err = pqm_create_queue(&p->pqm, dev, filep, &q_properties,
0, q_properties.type, &queue_id);
if (err != 0)
goto err_create_queue;
@ -385,7 +416,7 @@ static int kfd_ioctl_set_memory_policy(struct file *filep,
(args->alternate_policy == KFD_IOC_CACHE_POLICY_COHERENT)
? cache_policy_coherent : cache_policy_noncoherent;
if (!dev->dqm->set_cache_memory_policy(dev->dqm,
if (!dev->dqm->ops.set_cache_memory_policy(dev->dqm,
&pdd->qpd,
default_policy,
alternate_policy,

View File

@ -31,11 +31,20 @@
#define MQD_SIZE_ALIGNED 768
static const struct kfd_device_info kaveri_device_info = {
.asic_family = CHIP_KAVERI,
.max_pasid_bits = 16,
.ih_ring_entry_size = 4 * sizeof(uint32_t),
.mqd_size_aligned = MQD_SIZE_ALIGNED
};
static const struct kfd_device_info carrizo_device_info = {
.asic_family = CHIP_CARRIZO,
.max_pasid_bits = 16,
.ih_ring_entry_size = 4 * sizeof(uint32_t),
.num_of_watch_points = 4,
.mqd_size_aligned = MQD_SIZE_ALIGNED
};
struct kfd_deviceid {
unsigned short did;
const struct kfd_device_info *device_info;
@ -64,9 +73,13 @@ static const struct kfd_deviceid supported_devices[] = {
{ 0x1318, &kaveri_device_info }, /* Kaveri */
{ 0x131B, &kaveri_device_info }, /* Kaveri */
{ 0x131C, &kaveri_device_info }, /* Kaveri */
{ 0x131D, &kaveri_device_info }, /* Kaveri */
{ 0x131D, &kaveri_device_info } /* Kaveri */
};
static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
unsigned int chunk_size);
static void kfd_gtt_sa_fini(struct kfd_dev *kfd);
static const struct kfd_device_info *lookup_device_info(unsigned short did)
{
size_t i;
@ -173,16 +186,39 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
size = max_num_of_queues_per_device *
kfd->device_info->mqd_size_aligned;
/* add another 512KB for all other allocations on gart */
/*
* calculate max size of runlist packet.
* There can be only 2 packets at once
*/
size += (KFD_MAX_NUM_OF_PROCESSES * sizeof(struct pm4_map_process) +
max_num_of_queues_per_device *
sizeof(struct pm4_map_queues) + sizeof(struct pm4_runlist)) * 2;
/* Add size of HIQ & DIQ */
size += KFD_KERNEL_QUEUE_SIZE * 2;
/* add another 512KB for all other allocations on gart (HPD, fences) */
size += 512 * 1024;
if (kfd2kgd->init_sa_manager(kfd->kgd, size)) {
if (kfd2kgd->init_gtt_mem_allocation(kfd->kgd, size, &kfd->gtt_mem,
&kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr)) {
dev_err(kfd_device,
"Error initializing sa manager for device (%x:%x)\n",
kfd->pdev->vendor, kfd->pdev->device);
"Could not allocate %d bytes for device (%x:%x)\n",
size, kfd->pdev->vendor, kfd->pdev->device);
goto out;
}
dev_info(kfd_device,
"Allocated %d bytes on gart for device(%x:%x)\n",
size, kfd->pdev->vendor, kfd->pdev->device);
/* Initialize GTT sa with 512 byte chunk size */
if (kfd_gtt_sa_init(kfd, size, 512) != 0) {
dev_err(kfd_device,
"Error initializing gtt sub-allocator\n");
goto kfd_gtt_sa_init_error;
}
kfd_doorbell_init(kfd);
if (kfd_topology_add_device(kfd) != 0) {
@ -209,7 +245,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
goto device_queue_manager_error;
}
if (kfd->dqm->start(kfd->dqm) != 0) {
if (kfd->dqm->ops.start(kfd->dqm) != 0) {
dev_err(kfd_device,
"Error starting queuen manager for device (%x:%x)\n",
kfd->pdev->vendor, kfd->pdev->device);
@ -232,7 +268,9 @@ device_queue_manager_error:
device_iommu_pasid_error:
kfd_topology_remove_device(kfd);
kfd_topology_add_device_error:
kfd2kgd->fini_sa_manager(kfd->kgd);
kfd_gtt_sa_fini(kfd);
kfd_gtt_sa_init_error:
kfd2kgd->free_gtt_mem(kfd->kgd, kfd->gtt_mem);
dev_err(kfd_device,
"device (%x:%x) NOT added due to errors\n",
kfd->pdev->vendor, kfd->pdev->device);
@ -246,6 +284,8 @@ void kgd2kfd_device_exit(struct kfd_dev *kfd)
device_queue_manager_uninit(kfd->dqm);
amd_iommu_free_device(kfd->pdev);
kfd_topology_remove_device(kfd);
kfd_gtt_sa_fini(kfd);
kfd2kgd->free_gtt_mem(kfd->kgd, kfd->gtt_mem);
}
kfree(kfd);
@ -256,7 +296,7 @@ void kgd2kfd_suspend(struct kfd_dev *kfd)
BUG_ON(kfd == NULL);
if (kfd->init_complete) {
kfd->dqm->stop(kfd->dqm);
kfd->dqm->ops.stop(kfd->dqm);
amd_iommu_set_invalidate_ctx_cb(kfd->pdev, NULL);
amd_iommu_free_device(kfd->pdev);
}
@ -277,7 +317,7 @@ int kgd2kfd_resume(struct kfd_dev *kfd)
return -ENXIO;
amd_iommu_set_invalidate_ctx_cb(kfd->pdev,
iommu_pasid_shutdown_callback);
kfd->dqm->start(kfd->dqm);
kfd->dqm->ops.start(kfd->dqm);
}
return 0;
@ -288,3 +328,188 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
{
/* Process interrupts / schedule work as necessary */
}
static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
unsigned int chunk_size)
{
unsigned int num_of_bits;
BUG_ON(!kfd);
BUG_ON(!kfd->gtt_mem);
BUG_ON(buf_size < chunk_size);
BUG_ON(buf_size == 0);
BUG_ON(chunk_size == 0);
kfd->gtt_sa_chunk_size = chunk_size;
kfd->gtt_sa_num_of_chunks = buf_size / chunk_size;
num_of_bits = kfd->gtt_sa_num_of_chunks / BITS_PER_BYTE;
BUG_ON(num_of_bits == 0);
kfd->gtt_sa_bitmap = kzalloc(num_of_bits, GFP_KERNEL);
if (!kfd->gtt_sa_bitmap)
return -ENOMEM;
pr_debug("kfd: gtt_sa_num_of_chunks = %d, gtt_sa_bitmap = %p\n",
kfd->gtt_sa_num_of_chunks, kfd->gtt_sa_bitmap);
mutex_init(&kfd->gtt_sa_lock);
return 0;
}
static void kfd_gtt_sa_fini(struct kfd_dev *kfd)
{
mutex_destroy(&kfd->gtt_sa_lock);
kfree(kfd->gtt_sa_bitmap);
}
static inline uint64_t kfd_gtt_sa_calc_gpu_addr(uint64_t start_addr,
unsigned int bit_num,
unsigned int chunk_size)
{
return start_addr + bit_num * chunk_size;
}
static inline uint32_t *kfd_gtt_sa_calc_cpu_addr(void *start_addr,
unsigned int bit_num,
unsigned int chunk_size)
{
return (uint32_t *) ((uint64_t) start_addr + bit_num * chunk_size);
}
int kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned int size,
struct kfd_mem_obj **mem_obj)
{
unsigned int found, start_search, cur_size;
BUG_ON(!kfd);
if (size == 0)
return -EINVAL;
if (size > kfd->gtt_sa_num_of_chunks * kfd->gtt_sa_chunk_size)
return -ENOMEM;
*mem_obj = kmalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL);
if ((*mem_obj) == NULL)
return -ENOMEM;
pr_debug("kfd: allocated mem_obj = %p for size = %d\n", *mem_obj, size);
start_search = 0;
mutex_lock(&kfd->gtt_sa_lock);
kfd_gtt_restart_search:
/* Find the first chunk that is free */
found = find_next_zero_bit(kfd->gtt_sa_bitmap,
kfd->gtt_sa_num_of_chunks,
start_search);
pr_debug("kfd: found = %d\n", found);
/* If there wasn't any free chunk, bail out */
if (found == kfd->gtt_sa_num_of_chunks)
goto kfd_gtt_no_free_chunk;
/* Update fields of mem_obj */
(*mem_obj)->range_start = found;
(*mem_obj)->range_end = found;
(*mem_obj)->gpu_addr = kfd_gtt_sa_calc_gpu_addr(
kfd->gtt_start_gpu_addr,
found,
kfd->gtt_sa_chunk_size);
(*mem_obj)->cpu_ptr = kfd_gtt_sa_calc_cpu_addr(
kfd->gtt_start_cpu_ptr,
found,
kfd->gtt_sa_chunk_size);
pr_debug("kfd: gpu_addr = %p, cpu_addr = %p\n",
(uint64_t *) (*mem_obj)->gpu_addr, (*mem_obj)->cpu_ptr);
/* If we need only one chunk, mark it as allocated and get out */
if (size <= kfd->gtt_sa_chunk_size) {
pr_debug("kfd: single bit\n");
set_bit(found, kfd->gtt_sa_bitmap);
goto kfd_gtt_out;
}
/* Otherwise, try to see if we have enough contiguous chunks */
cur_size = size - kfd->gtt_sa_chunk_size;
do {
(*mem_obj)->range_end =
find_next_zero_bit(kfd->gtt_sa_bitmap,
kfd->gtt_sa_num_of_chunks, ++found);
/*
* If next free chunk is not contiguous than we need to
* restart our search from the last free chunk we found (which
* wasn't contiguous to the previous ones
*/
if ((*mem_obj)->range_end != found) {
start_search = found;
goto kfd_gtt_restart_search;
}
/*
* If we reached end of buffer, bail out with error
*/
if (found == kfd->gtt_sa_num_of_chunks)
goto kfd_gtt_no_free_chunk;
/* Check if we don't need another chunk */
if (cur_size <= kfd->gtt_sa_chunk_size)
cur_size = 0;
else
cur_size -= kfd->gtt_sa_chunk_size;
} while (cur_size > 0);
pr_debug("kfd: range_start = %d, range_end = %d\n",
(*mem_obj)->range_start, (*mem_obj)->range_end);
/* Mark the chunks as allocated */
for (found = (*mem_obj)->range_start;
found <= (*mem_obj)->range_end;
found++)
set_bit(found, kfd->gtt_sa_bitmap);
kfd_gtt_out:
mutex_unlock(&kfd->gtt_sa_lock);
return 0;
kfd_gtt_no_free_chunk:
pr_debug("kfd: allocation failed with mem_obj = %p\n", mem_obj);
mutex_unlock(&kfd->gtt_sa_lock);
kfree(mem_obj);
return -ENOMEM;
}
int kfd_gtt_sa_free(struct kfd_dev *kfd, struct kfd_mem_obj *mem_obj)
{
unsigned int bit;
BUG_ON(!kfd);
/* Act like kfree when trying to free a NULL object */
if (!mem_obj)
return 0;
pr_debug("kfd: free mem_obj = %p, range_start = %d, range_end = %d\n",
mem_obj, mem_obj->range_start, mem_obj->range_end);
mutex_lock(&kfd->gtt_sa_lock);
/* Mark the chunks as free */
for (bit = mem_obj->range_start;
bit <= mem_obj->range_end;
bit++)
clear_bit(bit, kfd->gtt_sa_bitmap);
mutex_unlock(&kfd->gtt_sa_lock);
kfree(mem_obj);
return 0;
}

View File

@ -26,34 +26,40 @@
#include <linux/types.h>
#include <linux/printk.h>
#include <linux/bitops.h>
#include <linux/sched.h>
#include "kfd_priv.h"
#include "kfd_device_queue_manager.h"
#include "kfd_mqd_manager.h"
#include "cik_regs.h"
#include "kfd_kernel_queue.h"
#include "../../radeon/cik_reg.h"
/* Size of the per-pipe EOP queue */
#define CIK_HPD_EOP_BYTES_LOG2 11
#define CIK_HPD_EOP_BYTES (1U << CIK_HPD_EOP_BYTES_LOG2)
static bool is_mem_initialized;
static int init_memory(struct device_queue_manager *dqm);
static int set_pasid_vmid_mapping(struct device_queue_manager *dqm,
unsigned int pasid, unsigned int vmid);
static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
struct queue *q,
struct qcm_process_device *qpd);
static int execute_queues_cpsch(struct device_queue_manager *dqm, bool lock);
static int destroy_queues_cpsch(struct device_queue_manager *dqm, bool lock);
static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
struct queue *q,
struct qcm_process_device *qpd);
static inline unsigned int get_pipes_num(struct device_queue_manager *dqm)
static void deallocate_sdma_queue(struct device_queue_manager *dqm,
unsigned int sdma_queue_id);
static inline
enum KFD_MQD_TYPE get_mqd_type_from_queue_type(enum kfd_queue_type type)
{
BUG_ON(!dqm || !dqm->dev);
return dqm->dev->shared_resources.compute_pipe_count;
if (type == KFD_QUEUE_TYPE_SDMA)
return KFD_MQD_TYPE_SDMA;
return KFD_MQD_TYPE_CP;
}
static inline unsigned int get_first_pipe(struct device_queue_manager *dqm)
@ -67,61 +73,7 @@ static inline unsigned int get_pipes_num_cpsch(void)
return PIPE_PER_ME_CP_SCHEDULING;
}
static inline unsigned int
get_sh_mem_bases_nybble_64(struct kfd_process_device *pdd)
{
uint32_t nybble;
nybble = (pdd->lds_base >> 60) & 0x0E;
return nybble;
}
static inline unsigned int get_sh_mem_bases_32(struct kfd_process_device *pdd)
{
unsigned int shared_base;
shared_base = (pdd->lds_base >> 16) & 0xFF;
return shared_base;
}
static uint32_t compute_sh_mem_bases_64bit(unsigned int top_address_nybble);
static void init_process_memory(struct device_queue_manager *dqm,
struct qcm_process_device *qpd)
{
struct kfd_process_device *pdd;
unsigned int temp;
BUG_ON(!dqm || !qpd);
pdd = qpd_to_pdd(qpd);
/* check if sh_mem_config register already configured */
if (qpd->sh_mem_config == 0) {
qpd->sh_mem_config =
ALIGNMENT_MODE(SH_MEM_ALIGNMENT_MODE_UNALIGNED) |
DEFAULT_MTYPE(MTYPE_NONCACHED) |
APE1_MTYPE(MTYPE_NONCACHED);
qpd->sh_mem_ape1_limit = 0;
qpd->sh_mem_ape1_base = 0;
}
if (qpd->pqm->process->is_32bit_user_mode) {
temp = get_sh_mem_bases_32(pdd);
qpd->sh_mem_bases = SHARED_BASE(temp);
qpd->sh_mem_config |= PTR32;
} else {
temp = get_sh_mem_bases_nybble_64(pdd);
qpd->sh_mem_bases = compute_sh_mem_bases_64bit(temp);
}
pr_debug("kfd: is32bit process: %d sh_mem_bases nybble: 0x%X and register 0x%X\n",
qpd->pqm->process->is_32bit_user_mode, temp, qpd->sh_mem_bases);
}
static void program_sh_mem_settings(struct device_queue_manager *dqm,
void program_sh_mem_settings(struct device_queue_manager *dqm,
struct qcm_process_device *qpd)
{
return kfd2kgd->program_sh_mem_settings(dqm->dev->kgd, qpd->vmid,
@ -200,7 +152,10 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
*allocated_vmid = qpd->vmid;
q->properties.vmid = qpd->vmid;
retval = create_compute_queue_nocpsch(dqm, q, qpd);
if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE)
retval = create_compute_queue_nocpsch(dqm, q, qpd);
if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
retval = create_sdma_queue_nocpsch(dqm, q, qpd);
if (retval != 0) {
if (list_empty(&qpd->queues_list)) {
@ -212,7 +167,11 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
}
list_add(&q->list, &qpd->queues_list);
dqm->queue_count++;
if (q->properties.is_active)
dqm->queue_count++;
if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
dqm->sdma_queue_count++;
/*
* Unconditionally increment this counter, regardless of the queue's
@ -229,12 +188,12 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q)
{
bool set;
int pipe, bit;
int pipe, bit, i;
set = false;
for (pipe = dqm->next_pipe_to_allocate; pipe < get_pipes_num(dqm);
pipe = (pipe + 1) % get_pipes_num(dqm)) {
for (pipe = dqm->next_pipe_to_allocate, i = 0; i < get_pipes_num(dqm);
pipe = ((pipe + 1) % get_pipes_num(dqm)), ++i) {
if (dqm->allocated_queues[pipe] != 0) {
bit = find_first_bit(
(unsigned long *)&dqm->allocated_queues[pipe],
@ -275,7 +234,7 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
BUG_ON(!dqm || !q || !qpd);
mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_CIK_COMPUTE);
mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
if (mqd == NULL)
return -ENOMEM;
@ -319,28 +278,44 @@ static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
pr_debug("kfd: In Func %s\n", __func__);
mutex_lock(&dqm->lock);
mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_CIK_COMPUTE);
if (mqd == NULL) {
retval = -ENOMEM;
if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) {
mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
if (mqd == NULL) {
retval = -ENOMEM;
goto out;
}
deallocate_hqd(dqm, q);
} else if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_SDMA);
if (mqd == NULL) {
retval = -ENOMEM;
goto out;
}
dqm->sdma_queue_count--;
deallocate_sdma_queue(dqm, q->sdma_id);
} else {
pr_debug("q->properties.type is invalid (%d)\n",
q->properties.type);
retval = -EINVAL;
goto out;
}
retval = mqd->destroy_mqd(mqd, q->mqd,
KFD_PREEMPT_TYPE_WAVEFRONT,
KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS,
q->pipe, q->queue);
if (retval != 0)
goto out;
deallocate_hqd(dqm, q);
mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
list_del(&q->list);
if (list_empty(&qpd->queues_list))
deallocate_vmid(dqm, qpd, q);
dqm->queue_count--;
if (q->properties.is_active)
dqm->queue_count--;
/*
* Unconditionally decrement this counter, regardless of the queue's
@ -364,7 +339,8 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
BUG_ON(!dqm || !q || !q->mqd);
mutex_lock(&dqm->lock);
mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_CIK_COMPUTE);
mqd = dqm->ops.get_mqd_manager(dqm,
get_mqd_type_from_queue_type(q->properties.type));
if (mqd == NULL) {
mutex_unlock(&dqm->lock);
return -ENOMEM;
@ -415,6 +391,7 @@ static int register_process_nocpsch(struct device_queue_manager *dqm,
struct qcm_process_device *qpd)
{
struct device_process_node *n;
int retval;
BUG_ON(!dqm || !qpd);
@ -429,12 +406,13 @@ static int register_process_nocpsch(struct device_queue_manager *dqm,
mutex_lock(&dqm->lock);
list_add(&n->list, &dqm->queues);
init_process_memory(dqm, qpd);
retval = dqm->ops_asic_specific.register_process(dqm, qpd);
dqm->processes_count++;
mutex_unlock(&dqm->lock);
return 0;
return retval;
}
static int unregister_process_nocpsch(struct device_queue_manager *dqm,
@ -479,48 +457,7 @@ set_pasid_vmid_mapping(struct device_queue_manager *dqm, unsigned int pasid,
vmid);
}
static uint32_t compute_sh_mem_bases_64bit(unsigned int top_address_nybble)
{
/* In 64-bit mode, we can only control the top 3 bits of the LDS,
* scratch and GPUVM apertures.
* The hardware fills in the remaining 59 bits according to the
* following pattern:
* LDS: X0000000'00000000 - X0000001'00000000 (4GB)
* Scratch: X0000001'00000000 - X0000002'00000000 (4GB)
* GPUVM: Y0010000'00000000 - Y0020000'00000000 (1TB)
*
* (where X/Y is the configurable nybble with the low-bit 0)
*
* LDS and scratch will have the same top nybble programmed in the
* top 3 bits of SH_MEM_BASES.PRIVATE_BASE.
* GPUVM can have a different top nybble programmed in the
* top 3 bits of SH_MEM_BASES.SHARED_BASE.
* We don't bother to support different top nybbles
* for LDS/Scratch and GPUVM.
*/
BUG_ON((top_address_nybble & 1) || top_address_nybble > 0xE ||
top_address_nybble == 0);
return PRIVATE_BASE(top_address_nybble << 12) |
SHARED_BASE(top_address_nybble << 12);
}
static int init_memory(struct device_queue_manager *dqm)
{
int i, retval;
for (i = 8; i < 16; i++)
set_pasid_vmid_mapping(dqm, 0, i);
retval = kfd2kgd->init_memory(dqm->dev->kgd);
if (retval == 0)
is_mem_initialized = true;
return retval;
}
static int init_pipelines(struct device_queue_manager *dqm,
int init_pipelines(struct device_queue_manager *dqm,
unsigned int pipes_num, unsigned int first_pipe)
{
void *hpdptr;
@ -539,11 +476,8 @@ static int init_pipelines(struct device_queue_manager *dqm,
* because it contains no data when there are no active queues.
*/
err = kfd2kgd->allocate_mem(dqm->dev->kgd,
CIK_HPD_EOP_BYTES * pipes_num,
PAGE_SIZE,
KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
(struct kgd_mem **) &dqm->pipeline_mem);
err = kfd_gtt_sa_allocate(dqm->dev, CIK_HPD_EOP_BYTES * pipes_num,
&dqm->pipeline_mem);
if (err) {
pr_err("kfd: error allocate vidmem num pipes: %d\n",
@ -556,10 +490,9 @@ static int init_pipelines(struct device_queue_manager *dqm,
memset(hpdptr, 0, CIK_HPD_EOP_BYTES * pipes_num);
mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_CIK_COMPUTE);
mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
if (mqd == NULL) {
kfd2kgd->free_mem(dqm->dev->kgd,
(struct kgd_mem *) dqm->pipeline_mem);
kfd_gtt_sa_free(dqm->dev, dqm->pipeline_mem);
return -ENOMEM;
}
@ -579,7 +512,6 @@ static int init_pipelines(struct device_queue_manager *dqm,
return 0;
}
static int init_scheduler(struct device_queue_manager *dqm)
{
int retval;
@ -589,11 +521,6 @@ static int init_scheduler(struct device_queue_manager *dqm)
pr_debug("kfd: In %s\n", __func__);
retval = init_pipelines(dqm, get_pipes_num(dqm), get_first_pipe(dqm));
if (retval != 0)
return retval;
retval = init_memory(dqm);
return retval;
}
@ -609,6 +536,7 @@ static int initialize_nocpsch(struct device_queue_manager *dqm)
mutex_init(&dqm->lock);
INIT_LIST_HEAD(&dqm->queues);
dqm->queue_count = dqm->next_pipe_to_allocate = 0;
dqm->sdma_queue_count = 0;
dqm->allocated_queues = kcalloc(get_pipes_num(dqm),
sizeof(unsigned int), GFP_KERNEL);
if (!dqm->allocated_queues) {
@ -620,6 +548,7 @@ static int initialize_nocpsch(struct device_queue_manager *dqm)
dqm->allocated_queues[i] = (1 << QUEUES_PER_PIPE) - 1;
dqm->vmid_bitmap = (1 << VMID_PER_DEVICE) - 1;
dqm->sdma_bitmap = (1 << CIK_SDMA_QUEUES) - 1;
init_scheduler(dqm);
return 0;
@ -637,8 +566,7 @@ static void uninitialize_nocpsch(struct device_queue_manager *dqm)
for (i = 0 ; i < KFD_MQD_TYPE_MAX ; i++)
kfree(dqm->mqds[i]);
mutex_destroy(&dqm->lock);
kfd2kgd->free_mem(dqm->dev->kgd,
(struct kgd_mem *) dqm->pipeline_mem);
kfd_gtt_sa_free(dqm->dev, dqm->pipeline_mem);
}
static int start_nocpsch(struct device_queue_manager *dqm)
@ -651,6 +579,77 @@ static int stop_nocpsch(struct device_queue_manager *dqm)
return 0;
}
static int allocate_sdma_queue(struct device_queue_manager *dqm,
unsigned int *sdma_queue_id)
{
int bit;
if (dqm->sdma_bitmap == 0)
return -ENOMEM;
bit = find_first_bit((unsigned long *)&dqm->sdma_bitmap,
CIK_SDMA_QUEUES);
clear_bit(bit, (unsigned long *)&dqm->sdma_bitmap);
*sdma_queue_id = bit;
return 0;
}
static void deallocate_sdma_queue(struct device_queue_manager *dqm,
unsigned int sdma_queue_id)
{
if (sdma_queue_id >= CIK_SDMA_QUEUES)
return;
set_bit(sdma_queue_id, (unsigned long *)&dqm->sdma_bitmap);
}
static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q,
struct qcm_process_device *qpd)
{
uint32_t value = SDMA_ATC;
if (q->process->is_32bit_user_mode)
value |= SDMA_VA_PTR32 | get_sh_mem_bases_32(qpd_to_pdd(qpd));
else
value |= SDMA_VA_SHARED_BASE(get_sh_mem_bases_nybble_64(
qpd_to_pdd(qpd)));
q->properties.sdma_vm_addr = value;
}
static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
struct queue *q,
struct qcm_process_device *qpd)
{
struct mqd_manager *mqd;
int retval;
mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_SDMA);
if (!mqd)
return -ENOMEM;
retval = allocate_sdma_queue(dqm, &q->sdma_id);
if (retval != 0)
return retval;
q->properties.sdma_queue_id = q->sdma_id % CIK_SDMA_QUEUES_PER_ENGINE;
q->properties.sdma_engine_id = q->sdma_id / CIK_SDMA_ENGINE_NUM;
pr_debug("kfd: sdma id is: %d\n", q->sdma_id);
pr_debug(" sdma queue id: %d\n", q->properties.sdma_queue_id);
pr_debug(" sdma engine id: %d\n", q->properties.sdma_engine_id);
retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
&q->gart_mqd_addr, &q->properties);
if (retval != 0) {
deallocate_sdma_queue(dqm, q->sdma_id);
return retval;
}
init_sdma_vm(dqm, q, qpd);
return 0;
}
/*
* Device Queue Manager implementation for cp scheduler
*/
@ -692,8 +691,9 @@ static int initialize_cpsch(struct device_queue_manager *dqm)
mutex_init(&dqm->lock);
INIT_LIST_HEAD(&dqm->queues);
dqm->queue_count = dqm->processes_count = 0;
dqm->sdma_queue_count = 0;
dqm->active_runlist = false;
retval = init_pipelines(dqm, get_pipes_num(dqm), 0);
retval = dqm->ops_asic_specific.initialize(dqm);
if (retval != 0)
goto fail_init_pipelines;
@ -724,18 +724,14 @@ static int start_cpsch(struct device_queue_manager *dqm)
pr_debug("kfd: allocating fence memory\n");
/* allocate fence memory on the gart */
retval = kfd2kgd->allocate_mem(dqm->dev->kgd,
sizeof(*dqm->fence_addr),
32,
KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
(struct kgd_mem **) &dqm->fence_mem);
retval = kfd_gtt_sa_allocate(dqm->dev, sizeof(*dqm->fence_addr),
&dqm->fence_mem);
if (retval != 0)
goto fail_allocate_vidmem;
dqm->fence_addr = dqm->fence_mem->cpu_ptr;
dqm->fence_gpu_addr = dqm->fence_mem->gpu_addr;
list_for_each_entry(node, &dqm->queues, list)
if (node->qpd->pqm->process && dqm->dev)
kfd_bind_process_to_device(dqm->dev,
@ -764,8 +760,7 @@ static int stop_cpsch(struct device_queue_manager *dqm)
pdd = qpd_to_pdd(node->qpd);
pdd->bound = false;
}
kfd2kgd->free_mem(dqm->dev->kgd,
(struct kgd_mem *) dqm->fence_mem);
kfd_gtt_sa_free(dqm->dev, dqm->fence_mem);
pm_uninit(&dqm->packets);
return 0;
@ -828,6 +823,14 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
mutex_unlock(&dqm->lock);
}
static void select_sdma_engine_id(struct queue *q)
{
static int sdma_id;
q->sdma_id = sdma_id;
sdma_id = (sdma_id + 1) % 2;
}
static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
struct qcm_process_device *qpd, int *allocate_vmid)
{
@ -850,7 +853,12 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
goto out;
}
mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_CIK_CP);
if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
select_sdma_engine_id(q);
mqd = dqm->ops.get_mqd_manager(dqm,
get_mqd_type_from_queue_type(q->properties.type));
if (mqd == NULL) {
mutex_unlock(&dqm->lock);
return -ENOMEM;
@ -867,6 +875,8 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
retval = execute_queues_cpsch(dqm, false);
}
if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
dqm->sdma_queue_count++;
/*
* Unconditionally increment this counter, regardless of the queue's
* type or whether the queue is active.
@ -893,12 +903,20 @@ static int fence_wait_timeout(unsigned int *fence_addr,
pr_err("kfd: qcm fence wait loop timeout expired\n");
return -ETIME;
}
cpu_relax();
schedule();
}
return 0;
}
static int destroy_sdma_queues(struct device_queue_manager *dqm,
unsigned int sdma_engine)
{
return pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_SDMA,
KFD_PREEMPT_TYPE_FILTER_ALL_QUEUES, 0, false,
sdma_engine);
}
static int destroy_queues_cpsch(struct device_queue_manager *dqm, bool lock)
{
int retval;
@ -911,6 +929,15 @@ static int destroy_queues_cpsch(struct device_queue_manager *dqm, bool lock)
mutex_lock(&dqm->lock);
if (dqm->active_runlist == false)
goto out;
pr_debug("kfd: Before destroying queues, sdma queue count is : %u\n",
dqm->sdma_queue_count);
if (dqm->sdma_queue_count > 0) {
destroy_sdma_queues(dqm, 0);
destroy_sdma_queues(dqm, 1);
}
retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_COMPUTE,
KFD_PREEMPT_TYPE_FILTER_ALL_QUEUES, 0, false, 0);
if (retval != 0)
@ -982,15 +1009,19 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
/* remove queue from list to prevent rescheduling after preemption */
mutex_lock(&dqm->lock);
mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_CIK_CP);
mqd = dqm->ops.get_mqd_manager(dqm,
get_mqd_type_from_queue_type(q->properties.type));
if (!mqd) {
retval = -ENOMEM;
goto failed;
}
if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
dqm->sdma_queue_count--;
list_del(&q->list);
dqm->queue_count--;
if (q->properties.is_active)
dqm->queue_count--;
execute_queues_cpsch(dqm, false);
@ -1028,8 +1059,7 @@ static bool set_cache_memory_policy(struct device_queue_manager *dqm,
void __user *alternate_aperture_base,
uint64_t alternate_aperture_size)
{
uint32_t default_mtype;
uint32_t ape1_mtype;
bool retval;
pr_debug("kfd: In func %s\n", __func__);
@ -1066,18 +1096,13 @@ static bool set_cache_memory_policy(struct device_queue_manager *dqm,
qpd->sh_mem_ape1_limit = limit >> 16;
}
default_mtype = (default_policy == cache_policy_coherent) ?
MTYPE_NONCACHED :
MTYPE_CACHED;
ape1_mtype = (alternate_policy == cache_policy_coherent) ?
MTYPE_NONCACHED :
MTYPE_CACHED;
qpd->sh_mem_config = (qpd->sh_mem_config & PTR32)
| ALIGNMENT_MODE(SH_MEM_ALIGNMENT_MODE_UNALIGNED)
| DEFAULT_MTYPE(default_mtype)
| APE1_MTYPE(ape1_mtype);
retval = dqm->ops_asic_specific.set_cache_memory_policy(
dqm,
qpd,
default_policy,
alternate_policy,
alternate_aperture_base,
alternate_aperture_size);
if ((sched_policy == KFD_SCHED_POLICY_NO_HWS) && (qpd->vmid != 0))
program_sh_mem_settings(dqm, qpd);
@ -1087,7 +1112,7 @@ static bool set_cache_memory_policy(struct device_queue_manager *dqm,
qpd->sh_mem_ape1_limit);
mutex_unlock(&dqm->lock);
return true;
return retval;
out:
mutex_unlock(&dqm->lock);
@ -1100,6 +1125,8 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
BUG_ON(!dev);
pr_debug("kfd: loading device queue manager\n");
dqm = kzalloc(sizeof(struct device_queue_manager), GFP_KERNEL);
if (!dqm)
return NULL;
@ -1109,40 +1136,50 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
case KFD_SCHED_POLICY_HWS:
case KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION:
/* initialize dqm for cp scheduling */
dqm->create_queue = create_queue_cpsch;
dqm->initialize = initialize_cpsch;
dqm->start = start_cpsch;
dqm->stop = stop_cpsch;
dqm->destroy_queue = destroy_queue_cpsch;
dqm->update_queue = update_queue;
dqm->get_mqd_manager = get_mqd_manager_nocpsch;
dqm->register_process = register_process_nocpsch;
dqm->unregister_process = unregister_process_nocpsch;
dqm->uninitialize = uninitialize_nocpsch;
dqm->create_kernel_queue = create_kernel_queue_cpsch;
dqm->destroy_kernel_queue = destroy_kernel_queue_cpsch;
dqm->set_cache_memory_policy = set_cache_memory_policy;
dqm->ops.create_queue = create_queue_cpsch;
dqm->ops.initialize = initialize_cpsch;
dqm->ops.start = start_cpsch;
dqm->ops.stop = stop_cpsch;
dqm->ops.destroy_queue = destroy_queue_cpsch;
dqm->ops.update_queue = update_queue;
dqm->ops.get_mqd_manager = get_mqd_manager_nocpsch;
dqm->ops.register_process = register_process_nocpsch;
dqm->ops.unregister_process = unregister_process_nocpsch;
dqm->ops.uninitialize = uninitialize_nocpsch;
dqm->ops.create_kernel_queue = create_kernel_queue_cpsch;
dqm->ops.destroy_kernel_queue = destroy_kernel_queue_cpsch;
dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
break;
case KFD_SCHED_POLICY_NO_HWS:
/* initialize dqm for no cp scheduling */
dqm->start = start_nocpsch;
dqm->stop = stop_nocpsch;
dqm->create_queue = create_queue_nocpsch;
dqm->destroy_queue = destroy_queue_nocpsch;
dqm->update_queue = update_queue;
dqm->get_mqd_manager = get_mqd_manager_nocpsch;
dqm->register_process = register_process_nocpsch;
dqm->unregister_process = unregister_process_nocpsch;
dqm->initialize = initialize_nocpsch;
dqm->uninitialize = uninitialize_nocpsch;
dqm->set_cache_memory_policy = set_cache_memory_policy;
dqm->ops.start = start_nocpsch;
dqm->ops.stop = stop_nocpsch;
dqm->ops.create_queue = create_queue_nocpsch;
dqm->ops.destroy_queue = destroy_queue_nocpsch;
dqm->ops.update_queue = update_queue;
dqm->ops.get_mqd_manager = get_mqd_manager_nocpsch;
dqm->ops.register_process = register_process_nocpsch;
dqm->ops.unregister_process = unregister_process_nocpsch;
dqm->ops.initialize = initialize_nocpsch;
dqm->ops.uninitialize = uninitialize_nocpsch;
dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
break;
default:
BUG();
break;
}
if (dqm->initialize(dqm) != 0) {
switch (dev->device_info->asic_family) {
case CHIP_CARRIZO:
device_queue_manager_init_vi(&dqm->ops_asic_specific);
break;
case CHIP_KAVERI:
device_queue_manager_init_cik(&dqm->ops_asic_specific);
break;
}
if (dqm->ops.initialize(dqm) != 0) {
kfree(dqm);
return NULL;
}
@ -1154,7 +1191,6 @@ void device_queue_manager_uninit(struct device_queue_manager *dqm)
{
BUG_ON(!dqm);
dqm->uninitialize(dqm);
dqm->ops.uninitialize(dqm);
kfree(dqm);
}

View File

@ -36,6 +36,9 @@
#define KFD_VMID_START_OFFSET (8)
#define VMID_PER_DEVICE CIK_VMID_NUM
#define KFD_DQM_FIRST_PIPE (0)
#define CIK_SDMA_QUEUES (4)
#define CIK_SDMA_QUEUES_PER_ENGINE (2)
#define CIK_SDMA_ENGINE_NUM (2)
struct device_process_node {
struct qcm_process_device *qpd;
@ -43,7 +46,7 @@ struct device_process_node {
};
/**
* struct device_queue_manager
* struct device_queue_manager_ops
*
* @create_queue: Queue creation routine.
*
@ -78,15 +81,9 @@ struct device_process_node {
* @set_cache_memory_policy: Sets memory policy (cached/ non cached) for the
* memory apertures.
*
* This struct is a base class for the kfd queues scheduler in the
* device level. The device base class should expose the basic operations
* for queue creation and queue destruction. This base class hides the
* scheduling mode of the driver and the specific implementation of the
* concrete device. This class is the only class in the queues scheduler
* that configures the H/W.
*/
struct device_queue_manager {
struct device_queue_manager_ops {
int (*create_queue)(struct device_queue_manager *dqm,
struct queue *q,
struct qcm_process_device *qpd,
@ -121,7 +118,23 @@ struct device_queue_manager {
enum cache_policy alternate_policy,
void __user *alternate_aperture_base,
uint64_t alternate_aperture_size);
};
/**
* struct device_queue_manager
*
* This struct is a base class for the kfd queues scheduler in the
* device level. The device base class should expose the basic operations
* for queue creation and queue destruction. This base class hides the
* scheduling mode of the driver and the specific implementation of the
* concrete device. This class is the only class in the queues scheduler
* that configures the H/W.
*
*/
struct device_queue_manager {
struct device_queue_manager_ops ops;
struct device_queue_manager_ops ops_asic_specific;
struct mqd_manager *mqds[KFD_MQD_TYPE_MAX];
struct packet_manager packets;
@ -130,9 +143,11 @@ struct device_queue_manager {
struct list_head queues;
unsigned int processes_count;
unsigned int queue_count;
unsigned int sdma_queue_count;
unsigned int total_queue_count;
unsigned int next_pipe_to_allocate;
unsigned int *allocated_queues;
unsigned int sdma_bitmap;
unsigned int vmid_bitmap;
uint64_t pipelines_addr;
struct kfd_mem_obj *pipeline_mem;
@ -142,6 +157,28 @@ struct device_queue_manager {
bool active_runlist;
};
void device_queue_manager_init_cik(struct device_queue_manager_ops *ops);
void device_queue_manager_init_vi(struct device_queue_manager_ops *ops);
void program_sh_mem_settings(struct device_queue_manager *dqm,
struct qcm_process_device *qpd);
int init_pipelines(struct device_queue_manager *dqm,
unsigned int pipes_num, unsigned int first_pipe);
extern inline unsigned int get_sh_mem_bases_32(struct kfd_process_device *pdd)
{
return (pdd->lds_base >> 16) & 0xFF;
}
extern inline unsigned int
get_sh_mem_bases_nybble_64(struct kfd_process_device *pdd)
{
return (pdd->lds_base >> 60) & 0x0E;
}
extern inline unsigned int get_pipes_num(struct device_queue_manager *dqm)
{
BUG_ON(!dqm || !dqm->dev);
return dqm->dev->shared_resources.compute_pipe_count;
}
#endif /* KFD_DEVICE_QUEUE_MANAGER_H_ */

View File

@ -0,0 +1,135 @@
/*
* Copyright 2014 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "kfd_device_queue_manager.h"
#include "cik_regs.h"
static bool set_cache_memory_policy_cik(struct device_queue_manager *dqm,
struct qcm_process_device *qpd,
enum cache_policy default_policy,
enum cache_policy alternate_policy,
void __user *alternate_aperture_base,
uint64_t alternate_aperture_size);
static int register_process_cik(struct device_queue_manager *dqm,
struct qcm_process_device *qpd);
static int initialize_cpsch_cik(struct device_queue_manager *dqm);
void device_queue_manager_init_cik(struct device_queue_manager_ops *ops)
{
ops->set_cache_memory_policy = set_cache_memory_policy_cik;
ops->register_process = register_process_cik;
ops->initialize = initialize_cpsch_cik;
}
static uint32_t compute_sh_mem_bases_64bit(unsigned int top_address_nybble)
{
/* In 64-bit mode, we can only control the top 3 bits of the LDS,
* scratch and GPUVM apertures.
* The hardware fills in the remaining 59 bits according to the
* following pattern:
* LDS: X0000000'00000000 - X0000001'00000000 (4GB)
* Scratch: X0000001'00000000 - X0000002'00000000 (4GB)
* GPUVM: Y0010000'00000000 - Y0020000'00000000 (1TB)
*
* (where X/Y is the configurable nybble with the low-bit 0)
*
* LDS and scratch will have the same top nybble programmed in the
* top 3 bits of SH_MEM_BASES.PRIVATE_BASE.
* GPUVM can have a different top nybble programmed in the
* top 3 bits of SH_MEM_BASES.SHARED_BASE.
* We don't bother to support different top nybbles
* for LDS/Scratch and GPUVM.
*/
BUG_ON((top_address_nybble & 1) || top_address_nybble > 0xE ||
top_address_nybble == 0);
return PRIVATE_BASE(top_address_nybble << 12) |
SHARED_BASE(top_address_nybble << 12);
}
static bool set_cache_memory_policy_cik(struct device_queue_manager *dqm,
struct qcm_process_device *qpd,
enum cache_policy default_policy,
enum cache_policy alternate_policy,
void __user *alternate_aperture_base,
uint64_t alternate_aperture_size)
{
uint32_t default_mtype;
uint32_t ape1_mtype;
default_mtype = (default_policy == cache_policy_coherent) ?
MTYPE_NONCACHED :
MTYPE_CACHED;
ape1_mtype = (alternate_policy == cache_policy_coherent) ?
MTYPE_NONCACHED :
MTYPE_CACHED;
qpd->sh_mem_config = (qpd->sh_mem_config & PTR32)
| ALIGNMENT_MODE(SH_MEM_ALIGNMENT_MODE_UNALIGNED)
| DEFAULT_MTYPE(default_mtype)
| APE1_MTYPE(ape1_mtype);
return true;
}
static int register_process_cik(struct device_queue_manager *dqm,
struct qcm_process_device *qpd)
{
struct kfd_process_device *pdd;
unsigned int temp;
BUG_ON(!dqm || !qpd);
pdd = qpd_to_pdd(qpd);
/* check if sh_mem_config register already configured */
if (qpd->sh_mem_config == 0) {
qpd->sh_mem_config =
ALIGNMENT_MODE(SH_MEM_ALIGNMENT_MODE_UNALIGNED) |
DEFAULT_MTYPE(MTYPE_NONCACHED) |
APE1_MTYPE(MTYPE_NONCACHED);
qpd->sh_mem_ape1_limit = 0;
qpd->sh_mem_ape1_base = 0;
}
if (qpd->pqm->process->is_32bit_user_mode) {
temp = get_sh_mem_bases_32(pdd);
qpd->sh_mem_bases = SHARED_BASE(temp);
qpd->sh_mem_config |= PTR32;
} else {
temp = get_sh_mem_bases_nybble_64(pdd);
qpd->sh_mem_bases = compute_sh_mem_bases_64bit(temp);
}
pr_debug("kfd: is32bit process: %d sh_mem_bases nybble: 0x%X and register 0x%X\n",
qpd->pqm->process->is_32bit_user_mode, temp, qpd->sh_mem_bases);
return 0;
}
static int initialize_cpsch_cik(struct device_queue_manager *dqm)
{
return init_pipelines(dqm, get_pipes_num(dqm), 0);
}

View File

@ -0,0 +1,64 @@
/*
* Copyright 2014 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "kfd_device_queue_manager.h"
static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm,
struct qcm_process_device *qpd,
enum cache_policy default_policy,
enum cache_policy alternate_policy,
void __user *alternate_aperture_base,
uint64_t alternate_aperture_size);
static int register_process_vi(struct device_queue_manager *dqm,
struct qcm_process_device *qpd);
static int initialize_cpsch_vi(struct device_queue_manager *dqm);
void device_queue_manager_init_vi(struct device_queue_manager_ops *ops)
{
pr_warn("amdkfd: VI DQM is not currently supported\n");
ops->set_cache_memory_policy = set_cache_memory_policy_vi;
ops->register_process = register_process_vi;
ops->initialize = initialize_cpsch_vi;
}
static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm,
struct qcm_process_device *qpd,
enum cache_policy default_policy,
enum cache_policy alternate_policy,
void __user *alternate_aperture_base,
uint64_t alternate_aperture_size)
{
return false;
}
static int register_process_vi(struct device_queue_manager *dqm,
struct qcm_process_device *qpd)
{
return -1;
}
static int initialize_cpsch_vi(struct device_queue_manager *dqm)
{
return 0;
}

View File

@ -137,10 +137,6 @@ int kfd_doorbell_mmap(struct kfd_process *process, struct vm_area_struct *vma)
if (dev == NULL)
return -EINVAL;
/* Find if pdd exists for combination of process and gpu id */
if (!kfd_get_process_device_data(dev, process, 0))
return -EINVAL;
/* Calculate physical address of doorbell */
address = kfd_get_process_doorbells(dev, process);

View File

@ -303,10 +303,11 @@ int kfd_init_apertures(struct kfd_process *process)
while ((dev = kfd_topology_enum_kfd_devices(id)) != NULL &&
id < NUM_OF_SUPPORTED_GPUS) {
pdd = kfd_get_process_device_data(dev, process, 1);
if (!pdd)
pdd = kfd_create_process_device_data(dev, process);
if (pdd == NULL) {
pr_err("Failed to create process device data\n");
return -1;
}
/*
* For 64 bit process aperture will be statically reserved in
* the x86_64 non canonical process address space

View File

@ -56,8 +56,8 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
switch (type) {
case KFD_QUEUE_TYPE_DIQ:
case KFD_QUEUE_TYPE_HIQ:
kq->mqd = dev->dqm->get_mqd_manager(dev->dqm,
KFD_MQD_TYPE_CIK_HIQ);
kq->mqd = dev->dqm->ops.get_mqd_manager(dev->dqm,
KFD_MQD_TYPE_HIQ);
break;
default:
BUG();
@ -72,23 +72,19 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
if (prop.doorbell_ptr == NULL)
goto err_get_kernel_doorbell;
retval = kfd2kgd->allocate_mem(dev->kgd,
queue_size,
PAGE_SIZE,
KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
(struct kgd_mem **) &kq->pq);
retval = kfd_gtt_sa_allocate(dev, queue_size, &kq->pq);
if (retval != 0)
goto err_pq_allocate_vidmem;
kq->pq_kernel_addr = kq->pq->cpu_ptr;
kq->pq_gpu_addr = kq->pq->gpu_addr;
retval = kfd2kgd->allocate_mem(dev->kgd,
sizeof(*kq->rptr_kernel),
32,
KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
(struct kgd_mem **) &kq->rptr_mem);
retval = kq->ops_asic_specific.initialize(kq, dev, type, queue_size);
if (retval == false)
goto err_eop_allocate_vidmem;
retval = kfd_gtt_sa_allocate(dev, sizeof(*kq->rptr_kernel),
&kq->rptr_mem);
if (retval != 0)
goto err_rptr_allocate_vidmem;
@ -96,11 +92,8 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
kq->rptr_kernel = kq->rptr_mem->cpu_ptr;
kq->rptr_gpu_addr = kq->rptr_mem->gpu_addr;
retval = kfd2kgd->allocate_mem(dev->kgd,
sizeof(*kq->wptr_kernel),
32,
KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
(struct kgd_mem **) &kq->wptr_mem);
retval = kfd_gtt_sa_allocate(dev, sizeof(*kq->wptr_kernel),
&kq->wptr_mem);
if (retval != 0)
goto err_wptr_allocate_vidmem;
@ -121,6 +114,8 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
prop.queue_address = kq->pq_gpu_addr;
prop.read_ptr = (uint32_t *) kq->rptr_gpu_addr;
prop.write_ptr = (uint32_t *) kq->wptr_gpu_addr;
prop.eop_ring_buffer_address = kq->eop_gpu_addr;
prop.eop_ring_buffer_size = PAGE_SIZE;
if (init_queue(&kq->queue, prop) != 0)
goto err_init_queue;
@ -145,11 +140,8 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
} else {
/* allocate fence for DIQ */
retval = kfd2kgd->allocate_mem(dev->kgd,
sizeof(uint32_t),
32,
KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
(struct kgd_mem **) &kq->fence_mem_obj);
retval = kfd_gtt_sa_allocate(dev, sizeof(uint32_t),
&kq->fence_mem_obj);
if (retval != 0)
goto err_alloc_fence;
@ -165,11 +157,13 @@ err_alloc_fence:
err_init_mqd:
uninit_queue(kq->queue);
err_init_queue:
kfd2kgd->free_mem(dev->kgd, (struct kgd_mem *) kq->wptr_mem);
kfd_gtt_sa_free(dev, kq->wptr_mem);
err_wptr_allocate_vidmem:
kfd2kgd->free_mem(dev->kgd, (struct kgd_mem *) kq->rptr_mem);
kfd_gtt_sa_free(dev, kq->rptr_mem);
err_rptr_allocate_vidmem:
kfd2kgd->free_mem(dev->kgd, (struct kgd_mem *) kq->pq);
kfd_gtt_sa_free(dev, kq->eop_mem);
err_eop_allocate_vidmem:
kfd_gtt_sa_free(dev, kq->pq);
err_pq_allocate_vidmem:
pr_err("kfd: error init pq\n");
kfd_release_kernel_doorbell(dev, prop.doorbell_ptr);
@ -190,10 +184,13 @@ static void uninitialize(struct kernel_queue *kq)
QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS,
kq->queue->pipe,
kq->queue->queue);
else if (kq->queue->properties.type == KFD_QUEUE_TYPE_DIQ)
kfd_gtt_sa_free(kq->dev, kq->fence_mem_obj);
kfd2kgd->free_mem(kq->dev->kgd, (struct kgd_mem *) kq->rptr_mem);
kfd2kgd->free_mem(kq->dev->kgd, (struct kgd_mem *) kq->wptr_mem);
kfd2kgd->free_mem(kq->dev->kgd, (struct kgd_mem *) kq->pq);
kfd_gtt_sa_free(kq->dev, kq->rptr_mem);
kfd_gtt_sa_free(kq->dev, kq->wptr_mem);
kq->ops_asic_specific.uninitialize(kq);
kfd_gtt_sa_free(kq->dev, kq->pq);
kfd_release_kernel_doorbell(kq->dev,
kq->queue->properties.doorbell_ptr);
uninit_queue(kq->queue);
@ -265,28 +262,6 @@ static void submit_packet(struct kernel_queue *kq)
kq->pending_wptr);
}
static int sync_with_hw(struct kernel_queue *kq, unsigned long timeout_ms)
{
unsigned long org_timeout_ms;
BUG_ON(!kq);
org_timeout_ms = timeout_ms;
timeout_ms += jiffies * 1000 / HZ;
while (*kq->wptr_kernel != *kq->rptr_kernel) {
if (time_after(jiffies * 1000 / HZ, timeout_ms)) {
pr_err("kfd: kernel_queue %s timeout expired %lu\n",
__func__, org_timeout_ms);
pr_err("kfd: wptr: %d rptr: %d\n",
*kq->wptr_kernel, *kq->rptr_kernel);
return -ETIME;
}
schedule();
}
return 0;
}
static void rollback_packet(struct kernel_queue *kq)
{
BUG_ON(!kq);
@ -304,14 +279,23 @@ struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
if (!kq)
return NULL;
kq->initialize = initialize;
kq->uninitialize = uninitialize;
kq->acquire_packet_buffer = acquire_packet_buffer;
kq->submit_packet = submit_packet;
kq->sync_with_hw = sync_with_hw;
kq->rollback_packet = rollback_packet;
kq->ops.initialize = initialize;
kq->ops.uninitialize = uninitialize;
kq->ops.acquire_packet_buffer = acquire_packet_buffer;
kq->ops.submit_packet = submit_packet;
kq->ops.rollback_packet = rollback_packet;
if (kq->initialize(kq, dev, type, KFD_KERNEL_QUEUE_SIZE) == false) {
switch (dev->device_info->asic_family) {
case CHIP_CARRIZO:
kernel_queue_init_vi(&kq->ops_asic_specific);
break;
case CHIP_KAVERI:
kernel_queue_init_cik(&kq->ops_asic_specific);
break;
}
if (kq->ops.initialize(kq, dev, type, KFD_KERNEL_QUEUE_SIZE) == false) {
pr_err("kfd: failed to init kernel queue\n");
kfree(kq);
return NULL;
@ -323,7 +307,7 @@ void kernel_queue_uninit(struct kernel_queue *kq)
{
BUG_ON(!kq);
kq->uninitialize(kq);
kq->ops.uninitialize(kq);
kfree(kq);
}
@ -335,19 +319,18 @@ static __attribute__((unused)) void test_kq(struct kfd_dev *dev)
BUG_ON(!dev);
pr_debug("kfd: starting kernel queue test\n");
pr_err("kfd: starting kernel queue test\n");
kq = kernel_queue_init(dev, KFD_QUEUE_TYPE_HIQ);
BUG_ON(!kq);
retval = kq->acquire_packet_buffer(kq, 5, &buffer);
retval = kq->ops.acquire_packet_buffer(kq, 5, &buffer);
BUG_ON(retval != 0);
for (i = 0; i < 5; i++)
buffer[i] = kq->nop_packet;
kq->submit_packet(kq);
kq->sync_with_hw(kq, 1000);
kq->ops.submit_packet(kq);
pr_debug("kfd: ending kernel queue test\n");
pr_err("kfd: ending kernel queue test\n");
}

View File

@ -28,8 +28,31 @@
#include <linux/types.h>
#include "kfd_priv.h"
struct kernel_queue {
/* interface */
/**
* struct kernel_queue_ops
*
* @initialize: Initialize a kernel queue, including allocations of GART memory
* needed for the queue.
*
* @uninitialize: Uninitialize a kernel queue and free all its memory usages.
*
* @acquire_packet_buffer: Returns a pointer to the location in the kernel
* queue ring buffer where the calling function can write its packet. It is
* Guaranteed that there is enough space for that packet. It also updates the
* pending write pointer to that location so subsequent calls to
* acquire_packet_buffer will get a correct write pointer
*
* @submit_packet: Update the write pointer and doorbell of a kernel queue.
*
* @sync_with_hw: Wait until the write pointer and the read pointer of a kernel
* queue are equal, which means the CP has read all the submitted packets.
*
* @rollback_packet: This routine is called if we failed to build an acquired
* packet for some reason. It just overwrites the pending wptr with the current
* one
*
*/
struct kernel_queue_ops {
bool (*initialize)(struct kernel_queue *kq, struct kfd_dev *dev,
enum kfd_queue_type type, unsigned int queue_size);
void (*uninitialize)(struct kernel_queue *kq);
@ -38,9 +61,12 @@ struct kernel_queue {
unsigned int **buffer_ptr);
void (*submit_packet)(struct kernel_queue *kq);
int (*sync_with_hw)(struct kernel_queue *kq,
unsigned long timeout_ms);
void (*rollback_packet)(struct kernel_queue *kq);
};
struct kernel_queue {
struct kernel_queue_ops ops;
struct kernel_queue_ops ops_asic_specific;
/* data */
struct kfd_dev *dev;
@ -58,6 +84,9 @@ struct kernel_queue {
struct kfd_mem_obj *pq;
uint64_t pq_gpu_addr;
uint32_t *pq_kernel_addr;
struct kfd_mem_obj *eop_mem;
uint64_t eop_gpu_addr;
uint32_t *eop_kernel_addr;
struct kfd_mem_obj *fence_mem_obj;
uint64_t fence_gpu_addr;
@ -66,4 +95,7 @@ struct kernel_queue {
struct list_head list;
};
void kernel_queue_init_cik(struct kernel_queue_ops *ops);
void kernel_queue_init_vi(struct kernel_queue_ops *ops);
#endif /* KFD_KERNEL_QUEUE_H_ */

View File

@ -0,0 +1,44 @@
/*
* Copyright 2014 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "kfd_kernel_queue.h"
static bool initialize_cik(struct kernel_queue *kq, struct kfd_dev *dev,
enum kfd_queue_type type, unsigned int queue_size);
static void uninitialize_cik(struct kernel_queue *kq);
void kernel_queue_init_cik(struct kernel_queue_ops *ops)
{
ops->initialize = initialize_cik;
ops->uninitialize = uninitialize_cik;
}
static bool initialize_cik(struct kernel_queue *kq, struct kfd_dev *dev,
enum kfd_queue_type type, unsigned int queue_size)
{
return true;
}
static void uninitialize_cik(struct kernel_queue *kq)
{
}

View File

@ -0,0 +1,56 @@
/*
* Copyright 2014 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "kfd_kernel_queue.h"
static bool initialize_vi(struct kernel_queue *kq, struct kfd_dev *dev,
enum kfd_queue_type type, unsigned int queue_size);
static void uninitialize_vi(struct kernel_queue *kq);
void kernel_queue_init_vi(struct kernel_queue_ops *ops)
{
ops->initialize = initialize_vi;
ops->uninitialize = uninitialize_vi;
}
static bool initialize_vi(struct kernel_queue *kq, struct kfd_dev *dev,
enum kfd_queue_type type, unsigned int queue_size)
{
int retval;
retval = kfd_gtt_sa_allocate(dev, PAGE_SIZE, &kq->eop_mem);
if (retval != 0)
return false;
kq->eop_gpu_addr = kq->eop_mem->gpu_addr;
kq->eop_kernel_addr = kq->eop_mem->cpu_ptr;
memset(kq->eop_kernel_addr, 0, PAGE_SIZE);
return true;
}
static void uninitialize_vi(struct kernel_queue *kq)
{
kfd_gtt_sa_free(kq->dev, kq->eop_mem);
}

View File

@ -29,10 +29,10 @@
#define KFD_DRIVER_AUTHOR "AMD Inc. and others"
#define KFD_DRIVER_DESC "Standalone HSA driver for AMD's GPUs"
#define KFD_DRIVER_DATE "20141113"
#define KFD_DRIVER_DATE "20150122"
#define KFD_DRIVER_MAJOR 0
#define KFD_DRIVER_MINOR 7
#define KFD_DRIVER_PATCHLEVEL 0
#define KFD_DRIVER_PATCHLEVEL 1
const struct kfd2kgd_calls *kfd2kgd;
static const struct kgd2kfd_calls kgd2kfd = {
@ -48,7 +48,7 @@ static const struct kgd2kfd_calls kgd2kfd = {
int sched_policy = KFD_SCHED_POLICY_HWS;
module_param(sched_policy, int, 0444);
MODULE_PARM_DESC(sched_policy,
"Kernel cmdline parameter that defines the amdkfd scheduling policy");
"Scheduling policy (0 = HWS (Default), 1 = HWS without over-subscription, 2 = Non-HWS (Used for debugging only)");
int max_num_of_queues_per_device = KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT;
module_param(max_num_of_queues_per_device, int, 0444);

View File

@ -21,326 +21,17 @@
*
*/
#include <linux/printk.h>
#include <linux/slab.h>
#include "kfd_priv.h"
#include "kfd_mqd_manager.h"
#include "cik_regs.h"
#include "../../radeon/cik_reg.h"
inline void busy_wait(unsigned long ms)
{
while (time_before(jiffies, ms))
cpu_relax();
}
static inline struct cik_mqd *get_mqd(void *mqd)
{
return (struct cik_mqd *)mqd;
}
static int init_mqd(struct mqd_manager *mm, void **mqd,
struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr,
struct queue_properties *q)
{
uint64_t addr;
struct cik_mqd *m;
int retval;
BUG_ON(!mm || !q || !mqd);
pr_debug("kfd: In func %s\n", __func__);
retval = kfd2kgd->allocate_mem(mm->dev->kgd,
sizeof(struct cik_mqd),
256,
KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
(struct kgd_mem **) mqd_mem_obj);
if (retval != 0)
return -ENOMEM;
m = (struct cik_mqd *) (*mqd_mem_obj)->cpu_ptr;
addr = (*mqd_mem_obj)->gpu_addr;
memset(m, 0, ALIGN(sizeof(struct cik_mqd), 256));
m->header = 0xC0310800;
m->compute_pipelinestat_enable = 1;
m->compute_static_thread_mgmt_se0 = 0xFFFFFFFF;
m->compute_static_thread_mgmt_se1 = 0xFFFFFFFF;
m->compute_static_thread_mgmt_se2 = 0xFFFFFFFF;
m->compute_static_thread_mgmt_se3 = 0xFFFFFFFF;
/*
* Make sure to use the last queue state saved on mqd when the cp
* reassigns the queue, so when queue is switched on/off (e.g over
* subscription or quantum timeout) the context will be consistent
*/
m->cp_hqd_persistent_state =
DEFAULT_CP_HQD_PERSISTENT_STATE | PRELOAD_REQ;
m->cp_mqd_control = MQD_CONTROL_PRIV_STATE_EN;
m->cp_mqd_base_addr_lo = lower_32_bits(addr);
m->cp_mqd_base_addr_hi = upper_32_bits(addr);
m->cp_hqd_ib_control = DEFAULT_MIN_IB_AVAIL_SIZE | IB_ATC_EN;
/* Although WinKFD writes this, I suspect it should not be necessary */
m->cp_hqd_ib_control = IB_ATC_EN | DEFAULT_MIN_IB_AVAIL_SIZE;
m->cp_hqd_quantum = QUANTUM_EN | QUANTUM_SCALE_1MS |
QUANTUM_DURATION(10);
/*
* Pipe Priority
* Identifies the pipe relative priority when this queue is connected
* to the pipeline. The pipe priority is against the GFX pipe and HP3D.
* In KFD we are using a fixed pipe priority set to CS_MEDIUM.
* 0 = CS_LOW (typically below GFX)
* 1 = CS_MEDIUM (typically between HP3D and GFX
* 2 = CS_HIGH (typically above HP3D)
*/
m->cp_hqd_pipe_priority = 1;
m->cp_hqd_queue_priority = 15;
*mqd = m;
if (gart_addr != NULL)
*gart_addr = addr;
retval = mm->update_mqd(mm, m, q);
return retval;
}
static void uninit_mqd(struct mqd_manager *mm, void *mqd,
struct kfd_mem_obj *mqd_mem_obj)
{
BUG_ON(!mm || !mqd);
kfd2kgd->free_mem(mm->dev->kgd, (struct kgd_mem *) mqd_mem_obj);
}
static int load_mqd(struct mqd_manager *mm, void *mqd, uint32_t pipe_id,
uint32_t queue_id, uint32_t __user *wptr)
{
return kfd2kgd->hqd_load(mm->dev->kgd, mqd, pipe_id, queue_id, wptr);
}
static int update_mqd(struct mqd_manager *mm, void *mqd,
struct queue_properties *q)
{
struct cik_mqd *m;
BUG_ON(!mm || !q || !mqd);
pr_debug("kfd: In func %s\n", __func__);
m = get_mqd(mqd);
m->cp_hqd_pq_control = DEFAULT_RPTR_BLOCK_SIZE |
DEFAULT_MIN_AVAIL_SIZE | PQ_ATC_EN;
/*
* Calculating queue size which is log base 2 of actual queue size -1
* dwords and another -1 for ffs
*/
m->cp_hqd_pq_control |= ffs(q->queue_size / sizeof(unsigned int))
- 1 - 1;
m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
m->cp_hqd_pq_base_hi = upper_32_bits((uint64_t)q->queue_address >> 8);
m->cp_hqd_pq_rptr_report_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
m->cp_hqd_pq_rptr_report_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
m->cp_hqd_pq_doorbell_control = DOORBELL_EN |
DOORBELL_OFFSET(q->doorbell_off);
m->cp_hqd_vmid = q->vmid;
if (q->format == KFD_QUEUE_FORMAT_AQL) {
m->cp_hqd_iq_rptr = AQL_ENABLE;
m->cp_hqd_pq_control |= NO_UPDATE_RPTR;
}
m->cp_hqd_active = 0;
q->is_active = false;
if (q->queue_size > 0 &&
q->queue_address != 0 &&
q->queue_percent > 0) {
m->cp_hqd_active = 1;
q->is_active = true;
}
return 0;
}
static int destroy_mqd(struct mqd_manager *mm, void *mqd,
enum kfd_preempt_type type,
unsigned int timeout, uint32_t pipe_id,
uint32_t queue_id)
{
return kfd2kgd->hqd_destroy(mm->dev->kgd, type, timeout,
pipe_id, queue_id);
}
static bool is_occupied(struct mqd_manager *mm, void *mqd,
uint64_t queue_address, uint32_t pipe_id,
uint32_t queue_id)
{
return kfd2kgd->hqd_is_occupied(mm->dev->kgd, queue_address,
pipe_id, queue_id);
}
/*
* HIQ MQD Implementation, concrete implementation for HIQ MQD implementation.
* The HIQ queue in Kaveri is using the same MQD structure as all the user mode
* queues but with different initial values.
*/
static int init_mqd_hiq(struct mqd_manager *mm, void **mqd,
struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr,
struct queue_properties *q)
{
uint64_t addr;
struct cik_mqd *m;
int retval;
BUG_ON(!mm || !q || !mqd || !mqd_mem_obj);
pr_debug("kfd: In func %s\n", __func__);
retval = kfd2kgd->allocate_mem(mm->dev->kgd,
sizeof(struct cik_mqd),
256,
KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
(struct kgd_mem **) mqd_mem_obj);
if (retval != 0)
return -ENOMEM;
m = (struct cik_mqd *) (*mqd_mem_obj)->cpu_ptr;
addr = (*mqd_mem_obj)->gpu_addr;
memset(m, 0, ALIGN(sizeof(struct cik_mqd), 256));
m->header = 0xC0310800;
m->compute_pipelinestat_enable = 1;
m->compute_static_thread_mgmt_se0 = 0xFFFFFFFF;
m->compute_static_thread_mgmt_se1 = 0xFFFFFFFF;
m->compute_static_thread_mgmt_se2 = 0xFFFFFFFF;
m->compute_static_thread_mgmt_se3 = 0xFFFFFFFF;
m->cp_hqd_persistent_state = DEFAULT_CP_HQD_PERSISTENT_STATE |
PRELOAD_REQ;
m->cp_hqd_quantum = QUANTUM_EN | QUANTUM_SCALE_1MS |
QUANTUM_DURATION(10);
m->cp_mqd_control = MQD_CONTROL_PRIV_STATE_EN;
m->cp_mqd_base_addr_lo = lower_32_bits(addr);
m->cp_mqd_base_addr_hi = upper_32_bits(addr);
m->cp_hqd_ib_control = DEFAULT_MIN_IB_AVAIL_SIZE;
/*
* Pipe Priority
* Identifies the pipe relative priority when this queue is connected
* to the pipeline. The pipe priority is against the GFX pipe and HP3D.
* In KFD we are using a fixed pipe priority set to CS_MEDIUM.
* 0 = CS_LOW (typically below GFX)
* 1 = CS_MEDIUM (typically between HP3D and GFX
* 2 = CS_HIGH (typically above HP3D)
*/
m->cp_hqd_pipe_priority = 1;
m->cp_hqd_queue_priority = 15;
*mqd = m;
if (gart_addr)
*gart_addr = addr;
retval = mm->update_mqd(mm, m, q);
return retval;
}
static int update_mqd_hiq(struct mqd_manager *mm, void *mqd,
struct queue_properties *q)
{
struct cik_mqd *m;
BUG_ON(!mm || !q || !mqd);
pr_debug("kfd: In func %s\n", __func__);
m = get_mqd(mqd);
m->cp_hqd_pq_control = DEFAULT_RPTR_BLOCK_SIZE |
DEFAULT_MIN_AVAIL_SIZE |
PRIV_STATE |
KMD_QUEUE;
/*
* Calculating queue size which is log base 2 of actual queue
* size -1 dwords
*/
m->cp_hqd_pq_control |= ffs(q->queue_size / sizeof(unsigned int))
- 1 - 1;
m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
m->cp_hqd_pq_base_hi = upper_32_bits((uint64_t)q->queue_address >> 8);
m->cp_hqd_pq_rptr_report_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
m->cp_hqd_pq_rptr_report_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
m->cp_hqd_pq_doorbell_control = DOORBELL_EN |
DOORBELL_OFFSET(q->doorbell_off);
m->cp_hqd_vmid = q->vmid;
m->cp_hqd_active = 0;
q->is_active = false;
if (q->queue_size > 0 &&
q->queue_address != 0 &&
q->queue_percent > 0) {
m->cp_hqd_active = 1;
q->is_active = true;
}
return 0;
}
struct mqd_manager *mqd_manager_init(enum KFD_MQD_TYPE type,
struct kfd_dev *dev)
{
struct mqd_manager *mqd;
BUG_ON(!dev);
BUG_ON(type >= KFD_MQD_TYPE_MAX);
pr_debug("kfd: In func %s\n", __func__);
mqd = kzalloc(sizeof(struct mqd_manager), GFP_KERNEL);
if (!mqd)
return NULL;
mqd->dev = dev;
switch (type) {
case KFD_MQD_TYPE_CIK_CP:
case KFD_MQD_TYPE_CIK_COMPUTE:
mqd->init_mqd = init_mqd;
mqd->uninit_mqd = uninit_mqd;
mqd->load_mqd = load_mqd;
mqd->update_mqd = update_mqd;
mqd->destroy_mqd = destroy_mqd;
mqd->is_occupied = is_occupied;
break;
case KFD_MQD_TYPE_CIK_HIQ:
mqd->init_mqd = init_mqd_hiq;
mqd->uninit_mqd = uninit_mqd;
mqd->load_mqd = load_mqd;
mqd->update_mqd = update_mqd_hiq;
mqd->destroy_mqd = destroy_mqd;
mqd->is_occupied = is_occupied;
break;
default:
kfree(mqd);
return NULL;
switch (dev->device_info->asic_family) {
case CHIP_KAVERI:
return mqd_manager_init_cik(type, dev);
case CHIP_CARRIZO:
return mqd_manager_init_vi(type, dev);
}
return mqd;
return NULL;
}
/* SDMA queues should be implemented here when the cp will supports them */

View File

@ -0,0 +1,450 @@
/*
* Copyright 2014 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <linux/printk.h>
#include <linux/slab.h>
#include "kfd_priv.h"
#include "kfd_mqd_manager.h"
#include "cik_regs.h"
#include "cik_structs.h"
static inline struct cik_mqd *get_mqd(void *mqd)
{
return (struct cik_mqd *)mqd;
}
static int init_mqd(struct mqd_manager *mm, void **mqd,
struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr,
struct queue_properties *q)
{
uint64_t addr;
struct cik_mqd *m;
int retval;
BUG_ON(!mm || !q || !mqd);
pr_debug("kfd: In func %s\n", __func__);
retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct cik_mqd),
mqd_mem_obj);
if (retval != 0)
return -ENOMEM;
m = (struct cik_mqd *) (*mqd_mem_obj)->cpu_ptr;
addr = (*mqd_mem_obj)->gpu_addr;
memset(m, 0, ALIGN(sizeof(struct cik_mqd), 256));
m->header = 0xC0310800;
m->compute_pipelinestat_enable = 1;
m->compute_static_thread_mgmt_se0 = 0xFFFFFFFF;
m->compute_static_thread_mgmt_se1 = 0xFFFFFFFF;
m->compute_static_thread_mgmt_se2 = 0xFFFFFFFF;
m->compute_static_thread_mgmt_se3 = 0xFFFFFFFF;
/*
* Make sure to use the last queue state saved on mqd when the cp
* reassigns the queue, so when queue is switched on/off (e.g over
* subscription or quantum timeout) the context will be consistent
*/
m->cp_hqd_persistent_state =
DEFAULT_CP_HQD_PERSISTENT_STATE | PRELOAD_REQ;
m->cp_mqd_control = MQD_CONTROL_PRIV_STATE_EN;
m->cp_mqd_base_addr_lo = lower_32_bits(addr);
m->cp_mqd_base_addr_hi = upper_32_bits(addr);
m->cp_hqd_ib_control = DEFAULT_MIN_IB_AVAIL_SIZE | IB_ATC_EN;
/* Although WinKFD writes this, I suspect it should not be necessary */
m->cp_hqd_ib_control = IB_ATC_EN | DEFAULT_MIN_IB_AVAIL_SIZE;
m->cp_hqd_quantum = QUANTUM_EN | QUANTUM_SCALE_1MS |
QUANTUM_DURATION(10);
/*
* Pipe Priority
* Identifies the pipe relative priority when this queue is connected
* to the pipeline. The pipe priority is against the GFX pipe and HP3D.
* In KFD we are using a fixed pipe priority set to CS_MEDIUM.
* 0 = CS_LOW (typically below GFX)
* 1 = CS_MEDIUM (typically between HP3D and GFX
* 2 = CS_HIGH (typically above HP3D)
*/
m->cp_hqd_pipe_priority = 1;
m->cp_hqd_queue_priority = 15;
if (q->format == KFD_QUEUE_FORMAT_AQL)
m->cp_hqd_iq_rptr = AQL_ENABLE;
*mqd = m;
if (gart_addr != NULL)
*gart_addr = addr;
retval = mm->update_mqd(mm, m, q);
return retval;
}
static int init_mqd_sdma(struct mqd_manager *mm, void **mqd,
struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr,
struct queue_properties *q)
{
int retval;
struct cik_sdma_rlc_registers *m;
BUG_ON(!mm || !mqd || !mqd_mem_obj);
retval = kfd_gtt_sa_allocate(mm->dev,
sizeof(struct cik_sdma_rlc_registers),
mqd_mem_obj);
if (retval != 0)
return -ENOMEM;
m = (struct cik_sdma_rlc_registers *) (*mqd_mem_obj)->cpu_ptr;
memset(m, 0, sizeof(struct cik_sdma_rlc_registers));
*mqd = m;
if (gart_addr != NULL)
*gart_addr = (*mqd_mem_obj)->gpu_addr;
retval = mm->update_mqd(mm, m, q);
return retval;
}
static void uninit_mqd(struct mqd_manager *mm, void *mqd,
struct kfd_mem_obj *mqd_mem_obj)
{
BUG_ON(!mm || !mqd);
kfd_gtt_sa_free(mm->dev, mqd_mem_obj);
}
static void uninit_mqd_sdma(struct mqd_manager *mm, void *mqd,
struct kfd_mem_obj *mqd_mem_obj)
{
BUG_ON(!mm || !mqd);
kfd_gtt_sa_free(mm->dev, mqd_mem_obj);
}
static int load_mqd(struct mqd_manager *mm, void *mqd, uint32_t pipe_id,
uint32_t queue_id, uint32_t __user *wptr)
{
return kfd2kgd->hqd_load(mm->dev->kgd, mqd, pipe_id, queue_id, wptr);
}
static int load_mqd_sdma(struct mqd_manager *mm, void *mqd,
uint32_t pipe_id, uint32_t queue_id,
uint32_t __user *wptr)
{
return kfd2kgd->hqd_sdma_load(mm->dev->kgd, mqd);
}
static int update_mqd(struct mqd_manager *mm, void *mqd,
struct queue_properties *q)
{
struct cik_mqd *m;
BUG_ON(!mm || !q || !mqd);
pr_debug("kfd: In func %s\n", __func__);
m = get_mqd(mqd);
m->cp_hqd_pq_control = DEFAULT_RPTR_BLOCK_SIZE |
DEFAULT_MIN_AVAIL_SIZE | PQ_ATC_EN;
/*
* Calculating queue size which is log base 2 of actual queue size -1
* dwords and another -1 for ffs
*/
m->cp_hqd_pq_control |= ffs(q->queue_size / sizeof(unsigned int))
- 1 - 1;
m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
m->cp_hqd_pq_base_hi = upper_32_bits((uint64_t)q->queue_address >> 8);
m->cp_hqd_pq_rptr_report_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
m->cp_hqd_pq_rptr_report_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
m->cp_hqd_pq_doorbell_control = DOORBELL_EN |
DOORBELL_OFFSET(q->doorbell_off);
m->cp_hqd_vmid = q->vmid;
if (q->format == KFD_QUEUE_FORMAT_AQL) {
m->cp_hqd_pq_control |= NO_UPDATE_RPTR;
}
m->cp_hqd_active = 0;
q->is_active = false;
if (q->queue_size > 0 &&
q->queue_address != 0 &&
q->queue_percent > 0) {
m->cp_hqd_active = 1;
q->is_active = true;
}
return 0;
}
static int update_mqd_sdma(struct mqd_manager *mm, void *mqd,
struct queue_properties *q)
{
struct cik_sdma_rlc_registers *m;
BUG_ON(!mm || !mqd || !q);
m = get_sdma_mqd(mqd);
m->sdma_rlc_rb_cntl =
SDMA_RB_SIZE((ffs(q->queue_size / sizeof(unsigned int)))) |
SDMA_RB_VMID(q->vmid) |
SDMA_RPTR_WRITEBACK_ENABLE |
SDMA_RPTR_WRITEBACK_TIMER(6);
m->sdma_rlc_rb_base = lower_32_bits(q->queue_address >> 8);
m->sdma_rlc_rb_base_hi = upper_32_bits(q->queue_address >> 8);
m->sdma_rlc_rb_rptr_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
m->sdma_rlc_rb_rptr_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
m->sdma_rlc_doorbell = SDMA_OFFSET(q->doorbell_off) | SDMA_DB_ENABLE;
m->sdma_rlc_virtual_addr = q->sdma_vm_addr;
m->sdma_engine_id = q->sdma_engine_id;
m->sdma_queue_id = q->sdma_queue_id;
q->is_active = false;
if (q->queue_size > 0 &&
q->queue_address != 0 &&
q->queue_percent > 0) {
m->sdma_rlc_rb_cntl |= SDMA_RB_ENABLE;
q->is_active = true;
}
return 0;
}
static int destroy_mqd(struct mqd_manager *mm, void *mqd,
enum kfd_preempt_type type,
unsigned int timeout, uint32_t pipe_id,
uint32_t queue_id)
{
return kfd2kgd->hqd_destroy(mm->dev->kgd, type, timeout,
pipe_id, queue_id);
}
/*
* preempt type here is ignored because there is only one way
* to preempt sdma queue
*/
static int destroy_mqd_sdma(struct mqd_manager *mm, void *mqd,
enum kfd_preempt_type type,
unsigned int timeout, uint32_t pipe_id,
uint32_t queue_id)
{
return kfd2kgd->hqd_sdma_destroy(mm->dev->kgd, mqd, timeout);
}
static bool is_occupied(struct mqd_manager *mm, void *mqd,
uint64_t queue_address, uint32_t pipe_id,
uint32_t queue_id)
{
return kfd2kgd->hqd_is_occupied(mm->dev->kgd, queue_address,
pipe_id, queue_id);
}
static bool is_occupied_sdma(struct mqd_manager *mm, void *mqd,
uint64_t queue_address, uint32_t pipe_id,
uint32_t queue_id)
{
return kfd2kgd->hqd_sdma_is_occupied(mm->dev->kgd, mqd);
}
/*
* HIQ MQD Implementation, concrete implementation for HIQ MQD implementation.
* The HIQ queue in Kaveri is using the same MQD structure as all the user mode
* queues but with different initial values.
*/
static int init_mqd_hiq(struct mqd_manager *mm, void **mqd,
struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr,
struct queue_properties *q)
{
uint64_t addr;
struct cik_mqd *m;
int retval;
BUG_ON(!mm || !q || !mqd || !mqd_mem_obj);
pr_debug("kfd: In func %s\n", __func__);
retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct cik_mqd),
mqd_mem_obj);
if (retval != 0)
return -ENOMEM;
m = (struct cik_mqd *) (*mqd_mem_obj)->cpu_ptr;
addr = (*mqd_mem_obj)->gpu_addr;
memset(m, 0, ALIGN(sizeof(struct cik_mqd), 256));
m->header = 0xC0310800;
m->compute_pipelinestat_enable = 1;
m->compute_static_thread_mgmt_se0 = 0xFFFFFFFF;
m->compute_static_thread_mgmt_se1 = 0xFFFFFFFF;
m->compute_static_thread_mgmt_se2 = 0xFFFFFFFF;
m->compute_static_thread_mgmt_se3 = 0xFFFFFFFF;
m->cp_hqd_persistent_state = DEFAULT_CP_HQD_PERSISTENT_STATE |
PRELOAD_REQ;
m->cp_hqd_quantum = QUANTUM_EN | QUANTUM_SCALE_1MS |
QUANTUM_DURATION(10);
m->cp_mqd_control = MQD_CONTROL_PRIV_STATE_EN;
m->cp_mqd_base_addr_lo = lower_32_bits(addr);
m->cp_mqd_base_addr_hi = upper_32_bits(addr);
m->cp_hqd_ib_control = DEFAULT_MIN_IB_AVAIL_SIZE;
/*
* Pipe Priority
* Identifies the pipe relative priority when this queue is connected
* to the pipeline. The pipe priority is against the GFX pipe and HP3D.
* In KFD we are using a fixed pipe priority set to CS_MEDIUM.
* 0 = CS_LOW (typically below GFX)
* 1 = CS_MEDIUM (typically between HP3D and GFX
* 2 = CS_HIGH (typically above HP3D)
*/
m->cp_hqd_pipe_priority = 1;
m->cp_hqd_queue_priority = 15;
*mqd = m;
if (gart_addr)
*gart_addr = addr;
retval = mm->update_mqd(mm, m, q);
return retval;
}
static int update_mqd_hiq(struct mqd_manager *mm, void *mqd,
struct queue_properties *q)
{
struct cik_mqd *m;
BUG_ON(!mm || !q || !mqd);
pr_debug("kfd: In func %s\n", __func__);
m = get_mqd(mqd);
m->cp_hqd_pq_control = DEFAULT_RPTR_BLOCK_SIZE |
DEFAULT_MIN_AVAIL_SIZE |
PRIV_STATE |
KMD_QUEUE;
/*
* Calculating queue size which is log base 2 of actual queue
* size -1 dwords
*/
m->cp_hqd_pq_control |= ffs(q->queue_size / sizeof(unsigned int))
- 1 - 1;
m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
m->cp_hqd_pq_base_hi = upper_32_bits((uint64_t)q->queue_address >> 8);
m->cp_hqd_pq_rptr_report_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
m->cp_hqd_pq_rptr_report_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
m->cp_hqd_pq_doorbell_control = DOORBELL_EN |
DOORBELL_OFFSET(q->doorbell_off);
m->cp_hqd_vmid = q->vmid;
m->cp_hqd_active = 0;
q->is_active = false;
if (q->queue_size > 0 &&
q->queue_address != 0 &&
q->queue_percent > 0) {
m->cp_hqd_active = 1;
q->is_active = true;
}
return 0;
}
struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd)
{
struct cik_sdma_rlc_registers *m;
BUG_ON(!mqd);
m = (struct cik_sdma_rlc_registers *)mqd;
return m;
}
struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type,
struct kfd_dev *dev)
{
struct mqd_manager *mqd;
BUG_ON(!dev);
BUG_ON(type >= KFD_MQD_TYPE_MAX);
pr_debug("kfd: In func %s\n", __func__);
mqd = kzalloc(sizeof(struct mqd_manager), GFP_KERNEL);
if (!mqd)
return NULL;
mqd->dev = dev;
switch (type) {
case KFD_MQD_TYPE_CP:
case KFD_MQD_TYPE_COMPUTE:
mqd->init_mqd = init_mqd;
mqd->uninit_mqd = uninit_mqd;
mqd->load_mqd = load_mqd;
mqd->update_mqd = update_mqd;
mqd->destroy_mqd = destroy_mqd;
mqd->is_occupied = is_occupied;
break;
case KFD_MQD_TYPE_HIQ:
mqd->init_mqd = init_mqd_hiq;
mqd->uninit_mqd = uninit_mqd;
mqd->load_mqd = load_mqd;
mqd->update_mqd = update_mqd_hiq;
mqd->destroy_mqd = destroy_mqd;
mqd->is_occupied = is_occupied;
break;
case KFD_MQD_TYPE_SDMA:
mqd->init_mqd = init_mqd_sdma;
mqd->uninit_mqd = uninit_mqd_sdma;
mqd->load_mqd = load_mqd_sdma;
mqd->update_mqd = update_mqd_sdma;
mqd->destroy_mqd = destroy_mqd_sdma;
mqd->is_occupied = is_occupied_sdma;
break;
default:
kfree(mqd);
return NULL;
}
return mqd;
}

View File

@ -0,0 +1,33 @@
/*
* Copyright 2014 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <linux/printk.h>
#include "kfd_priv.h"
#include "kfd_mqd_manager.h"
struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type,
struct kfd_dev *dev)
{
pr_warn("amdkfd: VI MQD is not currently supported\n");
return NULL;
}

View File

@ -97,11 +97,8 @@ static int pm_allocate_runlist_ib(struct packet_manager *pm,
pm_calc_rlib_size(pm, rl_buffer_size, is_over_subscription);
retval = kfd2kgd->allocate_mem(pm->dqm->dev->kgd,
*rl_buffer_size,
PAGE_SIZE,
KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
(struct kgd_mem **) &pm->ib_buffer_obj);
retval = kfd_gtt_sa_allocate(pm->dqm->dev, *rl_buffer_size,
&pm->ib_buffer_obj);
if (retval != 0) {
pr_err("kfd: failed to allocate runlist IB\n");
@ -351,7 +348,7 @@ int pm_send_set_resources(struct packet_manager *pm,
pr_debug("kfd: In func %s\n", __func__);
mutex_lock(&pm->lock);
pm->priv_queue->acquire_packet_buffer(pm->priv_queue,
pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
sizeof(*packet) / sizeof(uint32_t),
(unsigned int **)&packet);
if (packet == NULL) {
@ -378,8 +375,7 @@ int pm_send_set_resources(struct packet_manager *pm,
packet->queue_mask_lo = lower_32_bits(res->queue_mask);
packet->queue_mask_hi = upper_32_bits(res->queue_mask);
pm->priv_queue->submit_packet(pm->priv_queue);
pm->priv_queue->sync_with_hw(pm->priv_queue, KFD_HIQ_TIMEOUT);
pm->priv_queue->ops.submit_packet(pm->priv_queue);
mutex_unlock(&pm->lock);
@ -405,7 +401,7 @@ int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues)
packet_size_dwords = sizeof(struct pm4_runlist) / sizeof(uint32_t);
mutex_lock(&pm->lock);
retval = pm->priv_queue->acquire_packet_buffer(pm->priv_queue,
retval = pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
packet_size_dwords, &rl_buffer);
if (retval != 0)
goto fail_acquire_packet_buffer;
@ -415,15 +411,14 @@ int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues)
if (retval != 0)
goto fail_create_runlist;
pm->priv_queue->submit_packet(pm->priv_queue);
pm->priv_queue->sync_with_hw(pm->priv_queue, KFD_HIQ_TIMEOUT);
pm->priv_queue->ops.submit_packet(pm->priv_queue);
mutex_unlock(&pm->lock);
return retval;
fail_create_runlist:
pm->priv_queue->rollback_packet(pm->priv_queue);
pm->priv_queue->ops.rollback_packet(pm->priv_queue);
fail_acquire_packet_buffer:
mutex_unlock(&pm->lock);
fail_create_runlist_ib:
@ -441,7 +436,7 @@ int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
BUG_ON(!pm || !fence_address);
mutex_lock(&pm->lock);
retval = pm->priv_queue->acquire_packet_buffer(
retval = pm->priv_queue->ops.acquire_packet_buffer(
pm->priv_queue,
sizeof(struct pm4_query_status) / sizeof(uint32_t),
(unsigned int **)&packet);
@ -462,8 +457,7 @@ int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
packet->data_hi = upper_32_bits((uint64_t)fence_value);
packet->data_lo = lower_32_bits((uint64_t)fence_value);
pm->priv_queue->submit_packet(pm->priv_queue);
pm->priv_queue->sync_with_hw(pm->priv_queue, KFD_HIQ_TIMEOUT);
pm->priv_queue->ops.submit_packet(pm->priv_queue);
mutex_unlock(&pm->lock);
return 0;
@ -485,7 +479,7 @@ int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
BUG_ON(!pm);
mutex_lock(&pm->lock);
retval = pm->priv_queue->acquire_packet_buffer(
retval = pm->priv_queue->ops.acquire_packet_buffer(
pm->priv_queue,
sizeof(struct pm4_unmap_queues) / sizeof(uint32_t),
&buffer);
@ -540,8 +534,7 @@ int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
break;
};
pm->priv_queue->submit_packet(pm->priv_queue);
pm->priv_queue->sync_with_hw(pm->priv_queue, KFD_HIQ_TIMEOUT);
pm->priv_queue->ops.submit_packet(pm->priv_queue);
mutex_unlock(&pm->lock);
return 0;
@ -557,8 +550,7 @@ void pm_release_ib(struct packet_manager *pm)
mutex_lock(&pm->lock);
if (pm->allocated) {
kfd2kgd->free_mem(pm->dqm->dev->kgd,
(struct kgd_mem *) pm->ib_buffer_obj);
kfd_gtt_sa_free(pm->dqm->dev, pm->ib_buffer_obj);
pm->allocated = false;
}
mutex_unlock(&pm->lock);

View File

@ -103,12 +103,26 @@ enum cache_policy {
cache_policy_noncoherent
};
enum asic_family_type {
CHIP_KAVERI = 0,
CHIP_CARRIZO
};
struct kfd_device_info {
unsigned int asic_family;
unsigned int max_pasid_bits;
size_t ih_ring_entry_size;
uint8_t num_of_watch_points;
uint16_t mqd_size_aligned;
};
struct kfd_mem_obj {
uint32_t range_start;
uint32_t range_end;
uint64_t gpu_addr;
uint32_t *cpu_ptr;
};
struct kfd_dev {
struct kgd_dev *kgd;
@ -134,6 +148,14 @@ struct kfd_dev {
struct kgd2kfd_shared_resources shared_resources;
void *gtt_mem;
uint64_t gtt_start_gpu_addr;
void *gtt_start_cpu_ptr;
void *gtt_sa_bitmap;
struct mutex gtt_sa_lock;
unsigned int gtt_sa_chunk_size;
unsigned int gtt_sa_num_of_chunks;
/* QCM Device instance */
struct device_queue_manager *dqm;
@ -149,12 +171,6 @@ void kgd2kfd_device_exit(struct kfd_dev *kfd);
extern const struct kfd2kgd_calls *kfd2kgd;
struct kfd_mem_obj {
void *bo;
uint64_t gpu_addr;
uint32_t *cpu_ptr;
};
enum kfd_mempool {
KFD_MEMPOOL_SYSTEM_CACHEABLE = 1,
KFD_MEMPOOL_SYSTEM_WRITECOMBINE = 2,
@ -272,6 +288,15 @@ struct queue_properties {
bool is_active;
/* Not relevant for user mode queues in cp scheduling */
unsigned int vmid;
/* Relevant only for sdma queues*/
uint32_t sdma_engine_id;
uint32_t sdma_queue_id;
uint32_t sdma_vm_addr;
/* Relevant only for VI */
uint64_t eop_ring_buffer_address;
uint32_t eop_ring_buffer_size;
uint64_t ctx_save_restore_area_address;
uint32_t ctx_save_restore_area_size;
};
/**
@ -314,6 +339,8 @@ struct queue {
uint32_t pipe;
uint32_t queue;
unsigned int sdma_id;
struct kfd_process *process;
struct kfd_dev *device;
};
@ -322,10 +349,10 @@ struct queue {
* Please read the kfd_mqd_manager.h description.
*/
enum KFD_MQD_TYPE {
KFD_MQD_TYPE_CIK_COMPUTE = 0, /* for no cp scheduling */
KFD_MQD_TYPE_CIK_HIQ, /* for hiq */
KFD_MQD_TYPE_CIK_CP, /* for cp queues and diq */
KFD_MQD_TYPE_CIK_SDMA, /* for sdma queues */
KFD_MQD_TYPE_COMPUTE = 0, /* for no cp scheduling */
KFD_MQD_TYPE_HIQ, /* for hiq */
KFD_MQD_TYPE_CP, /* for cp queues and diq */
KFD_MQD_TYPE_SDMA, /* for sdma queues */
KFD_MQD_TYPE_MAX
};
@ -477,8 +504,9 @@ struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
struct kfd_process *p);
void kfd_unbind_process_from_device(struct kfd_dev *dev, unsigned int pasid);
struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev,
struct kfd_process *p,
int create_pdd);
struct kfd_process *p);
struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
struct kfd_process *p);
/* Process device data iterator */
struct kfd_process_device *kfd_get_first_process_device_data(struct kfd_process *p);
@ -506,6 +534,13 @@ unsigned int kfd_queue_id_to_doorbell(struct kfd_dev *kfd,
struct kfd_process *process,
unsigned int queue_id);
/* GTT Sub-Allocator */
int kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned int size,
struct kfd_mem_obj **mem_obj);
int kfd_gtt_sa_free(struct kfd_dev *kfd, struct kfd_mem_obj *mem_obj);
extern struct device *kfd_device;
/* Topology */
@ -530,6 +565,8 @@ int kfd_init_apertures(struct kfd_process *process);
/* Queue Context Management */
inline uint32_t lower_32(uint64_t x);
inline uint32_t upper_32(uint64_t x);
struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd);
inline uint32_t get_sdma_base_addr(struct cik_sdma_rlc_registers *m);
int init_queue(struct queue **q, struct queue_properties properties);
void uninit_queue(struct queue *q);
@ -538,6 +575,10 @@ void print_queue(struct queue *q);
struct mqd_manager *mqd_manager_init(enum KFD_MQD_TYPE type,
struct kfd_dev *dev);
struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type,
struct kfd_dev *dev);
struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type,
struct kfd_dev *dev);
struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev);
void device_queue_manager_uninit(struct device_queue_manager *dqm);
struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,

View File

@ -311,24 +311,29 @@ err_alloc_process:
}
struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev,
struct kfd_process *p,
int create_pdd)
struct kfd_process *p)
{
struct kfd_process_device *pdd = NULL;
list_for_each_entry(pdd, &p->per_device_data, per_device_list)
if (pdd->dev == dev)
return pdd;
break;
if (create_pdd) {
pdd = kzalloc(sizeof(*pdd), GFP_KERNEL);
if (pdd != NULL) {
pdd->dev = dev;
INIT_LIST_HEAD(&pdd->qpd.queues_list);
INIT_LIST_HEAD(&pdd->qpd.priv_queue_list);
pdd->qpd.dqm = dev->dqm;
list_add(&pdd->per_device_list, &p->per_device_data);
}
return pdd;
}
struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
struct kfd_process *p)
{
struct kfd_process_device *pdd = NULL;
pdd = kzalloc(sizeof(*pdd), GFP_KERNEL);
if (pdd != NULL) {
pdd->dev = dev;
INIT_LIST_HEAD(&pdd->qpd.queues_list);
INIT_LIST_HEAD(&pdd->qpd.priv_queue_list);
pdd->qpd.dqm = dev->dqm;
list_add(&pdd->per_device_list, &p->per_device_data);
}
return pdd;
@ -344,11 +349,14 @@ struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev,
struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
struct kfd_process *p)
{
struct kfd_process_device *pdd = kfd_get_process_device_data(dev, p, 1);
struct kfd_process_device *pdd;
int err;
if (pdd == NULL)
pdd = kfd_get_process_device_data(dev, p);
if (!pdd) {
pr_err("Process device data doesn't exist\n");
return ERR_PTR(-ENOMEM);
}
if (pdd->bound)
return pdd;
@ -384,7 +392,7 @@ void kfd_unbind_process_from_device(struct kfd_dev *dev, unsigned int pasid)
pqm_uninit(&p->pqm);
pdd = kfd_get_process_device_data(dev, p, 0);
pdd = kfd_get_process_device_data(dev, p);
/*
* Just mark pdd as unbound, because we still need it to call

View File

@ -128,7 +128,6 @@ static int create_cp_queue(struct process_queue_manager *pqm,
/* let DQM handle it*/
q_properties->vmid = 0;
q_properties->queue_id = qid;
q_properties->type = KFD_QUEUE_TYPE_COMPUTE;
retval = init_queue(q, *q_properties);
if (retval != 0)
@ -167,8 +166,11 @@ int pqm_create_queue(struct process_queue_manager *pqm,
q = NULL;
kq = NULL;
pdd = kfd_get_process_device_data(dev, pqm->process, 1);
BUG_ON(!pdd);
pdd = kfd_get_process_device_data(dev, pqm->process);
if (!pdd) {
pr_err("Process device data doesn't exist\n");
return -1;
}
retval = find_available_queue_slot(pqm, qid);
if (retval != 0)
@ -176,7 +178,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
if (list_empty(&pqm->queues)) {
pdd->qpd.pqm = pqm;
dev->dqm->register_process(dev->dqm, &pdd->qpd);
dev->dqm->ops.register_process(dev->dqm, &pdd->qpd);
}
pqn = kzalloc(sizeof(struct process_queue_node), GFP_KERNEL);
@ -186,6 +188,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
}
switch (type) {
case KFD_QUEUE_TYPE_SDMA:
case KFD_QUEUE_TYPE_COMPUTE:
/* check if there is over subscription */
if ((sched_policy == KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION) &&
@ -201,7 +204,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
goto err_create_queue;
pqn->q = q;
pqn->kq = NULL;
retval = dev->dqm->create_queue(dev->dqm, q, &pdd->qpd,
retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd,
&q->properties.vmid);
pr_debug("DQM returned %d for create_queue\n", retval);
print_queue(q);
@ -215,7 +218,8 @@ int pqm_create_queue(struct process_queue_manager *pqm,
kq->queue->properties.queue_id = *qid;
pqn->kq = kq;
pqn->q = NULL;
retval = dev->dqm->create_kernel_queue(dev->dqm, kq, &pdd->qpd);
retval = dev->dqm->ops.create_kernel_queue(dev->dqm,
kq, &pdd->qpd);
break;
default:
BUG();
@ -245,7 +249,7 @@ err_allocate_pqn:
/* check if queues list is empty unregister process from device */
clear_bit(*qid, pqm->queue_slot_bitmap);
if (list_empty(&pqm->queues))
dev->dqm->unregister_process(dev->dqm, &pdd->qpd);
dev->dqm->ops.unregister_process(dev->dqm, &pdd->qpd);
return retval;
}
@ -277,19 +281,22 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
dev = pqn->q->device;
BUG_ON(!dev);
pdd = kfd_get_process_device_data(dev, pqm->process, 1);
BUG_ON(!pdd);
pdd = kfd_get_process_device_data(dev, pqm->process);
if (!pdd) {
pr_err("Process device data doesn't exist\n");
return -1;
}
if (pqn->kq) {
/* destroy kernel queue (DIQ) */
dqm = pqn->kq->dev->dqm;
dqm->destroy_kernel_queue(dqm, pqn->kq, &pdd->qpd);
dqm->ops.destroy_kernel_queue(dqm, pqn->kq, &pdd->qpd);
kernel_queue_uninit(pqn->kq);
}
if (pqn->q) {
dqm = pqn->q->device->dqm;
retval = dqm->destroy_queue(dqm, &pdd->qpd, pqn->q);
retval = dqm->ops.destroy_queue(dqm, &pdd->qpd, pqn->q);
if (retval != 0)
return retval;
@ -301,7 +308,7 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
clear_bit(qid, pqm->queue_slot_bitmap);
if (list_empty(&pqm->queues))
dqm->unregister_process(dqm, &pdd->qpd);
dqm->ops.unregister_process(dqm, &pdd->qpd);
return retval;
}
@ -326,7 +333,8 @@ int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid,
pqn->q->properties.queue_percent = p->queue_percent;
pqn->q->properties.priority = p->priority;
retval = pqn->q->device->dqm->update_queue(pqn->q->device->dqm, pqn->q);
retval = pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm,
pqn->q);
if (retval != 0)
return retval;

View File

@ -27,6 +27,7 @@
#include <linux/acpi.h>
#include <linux/hash.h>
#include <linux/cpufreq.h>
#include <linux/log2.h>
#include "kfd_priv.h"
#include "kfd_crat.h"
@ -630,10 +631,10 @@ static struct kobj_type cache_type = {
static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
char *buffer)
{
ssize_t ret;
struct kfd_topology_device *dev;
char public_name[KFD_TOPOLOGY_PUBLIC_NAME_SIZE];
uint32_t i;
uint32_t log_max_watch_addr;
/* Making sure that the buffer is an empty string */
buffer[0] = 0;
@ -641,8 +642,10 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
if (strcmp(attr->name, "gpu_id") == 0) {
dev = container_of(attr, struct kfd_topology_device,
attr_gpuid);
ret = sysfs_show_32bit_val(buffer, dev->gpu_id);
} else if (strcmp(attr->name, "name") == 0) {
return sysfs_show_32bit_val(buffer, dev->gpu_id);
}
if (strcmp(attr->name, "name") == 0) {
dev = container_of(attr, struct kfd_topology_device,
attr_name);
for (i = 0; i < KFD_TOPOLOGY_PUBLIC_NAME_SIZE; i++) {
@ -652,80 +655,90 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
break;
}
public_name[KFD_TOPOLOGY_PUBLIC_NAME_SIZE-1] = 0x0;
ret = sysfs_show_str_val(buffer, public_name);
} else {
dev = container_of(attr, struct kfd_topology_device,
attr_props);
sysfs_show_32bit_prop(buffer, "cpu_cores_count",
dev->node_props.cpu_cores_count);
sysfs_show_32bit_prop(buffer, "simd_count",
dev->node_props.simd_count);
if (dev->mem_bank_count < dev->node_props.mem_banks_count) {
pr_warn("kfd: mem_banks_count truncated from %d to %d\n",
dev->node_props.mem_banks_count,
dev->mem_bank_count);
sysfs_show_32bit_prop(buffer, "mem_banks_count",
dev->mem_bank_count);
} else {
sysfs_show_32bit_prop(buffer, "mem_banks_count",
dev->node_props.mem_banks_count);
}
sysfs_show_32bit_prop(buffer, "caches_count",
dev->node_props.caches_count);
sysfs_show_32bit_prop(buffer, "io_links_count",
dev->node_props.io_links_count);
sysfs_show_32bit_prop(buffer, "cpu_core_id_base",
dev->node_props.cpu_core_id_base);
sysfs_show_32bit_prop(buffer, "simd_id_base",
dev->node_props.simd_id_base);
sysfs_show_32bit_prop(buffer, "capability",
dev->node_props.capability);
sysfs_show_32bit_prop(buffer, "max_waves_per_simd",
dev->node_props.max_waves_per_simd);
sysfs_show_32bit_prop(buffer, "lds_size_in_kb",
dev->node_props.lds_size_in_kb);
sysfs_show_32bit_prop(buffer, "gds_size_in_kb",
dev->node_props.gds_size_in_kb);
sysfs_show_32bit_prop(buffer, "wave_front_size",
dev->node_props.wave_front_size);
sysfs_show_32bit_prop(buffer, "array_count",
dev->node_props.array_count);
sysfs_show_32bit_prop(buffer, "simd_arrays_per_engine",
dev->node_props.simd_arrays_per_engine);
sysfs_show_32bit_prop(buffer, "cu_per_simd_array",
dev->node_props.cu_per_simd_array);
sysfs_show_32bit_prop(buffer, "simd_per_cu",
dev->node_props.simd_per_cu);
sysfs_show_32bit_prop(buffer, "max_slots_scratch_cu",
dev->node_props.max_slots_scratch_cu);
sysfs_show_32bit_prop(buffer, "vendor_id",
dev->node_props.vendor_id);
sysfs_show_32bit_prop(buffer, "device_id",
dev->node_props.device_id);
sysfs_show_32bit_prop(buffer, "location_id",
dev->node_props.location_id);
if (dev->gpu) {
sysfs_show_32bit_prop(buffer, "max_engine_clk_fcompute",
kfd2kgd->get_max_engine_clock_in_mhz(
dev->gpu->kgd));
sysfs_show_64bit_prop(buffer, "local_mem_size",
kfd2kgd->get_vmem_size(dev->gpu->kgd));
sysfs_show_32bit_prop(buffer, "fw_version",
kfd2kgd->get_fw_version(
dev->gpu->kgd,
KGD_ENGINE_MEC1));
}
ret = sysfs_show_32bit_prop(buffer, "max_engine_clk_ccompute",
cpufreq_quick_get_max(0)/1000);
return sysfs_show_str_val(buffer, public_name);
}
return ret;
dev = container_of(attr, struct kfd_topology_device,
attr_props);
sysfs_show_32bit_prop(buffer, "cpu_cores_count",
dev->node_props.cpu_cores_count);
sysfs_show_32bit_prop(buffer, "simd_count",
dev->node_props.simd_count);
if (dev->mem_bank_count < dev->node_props.mem_banks_count) {
pr_warn("kfd: mem_banks_count truncated from %d to %d\n",
dev->node_props.mem_banks_count,
dev->mem_bank_count);
sysfs_show_32bit_prop(buffer, "mem_banks_count",
dev->mem_bank_count);
} else {
sysfs_show_32bit_prop(buffer, "mem_banks_count",
dev->node_props.mem_banks_count);
}
sysfs_show_32bit_prop(buffer, "caches_count",
dev->node_props.caches_count);
sysfs_show_32bit_prop(buffer, "io_links_count",
dev->node_props.io_links_count);
sysfs_show_32bit_prop(buffer, "cpu_core_id_base",
dev->node_props.cpu_core_id_base);
sysfs_show_32bit_prop(buffer, "simd_id_base",
dev->node_props.simd_id_base);
sysfs_show_32bit_prop(buffer, "capability",
dev->node_props.capability);
sysfs_show_32bit_prop(buffer, "max_waves_per_simd",
dev->node_props.max_waves_per_simd);
sysfs_show_32bit_prop(buffer, "lds_size_in_kb",
dev->node_props.lds_size_in_kb);
sysfs_show_32bit_prop(buffer, "gds_size_in_kb",
dev->node_props.gds_size_in_kb);
sysfs_show_32bit_prop(buffer, "wave_front_size",
dev->node_props.wave_front_size);
sysfs_show_32bit_prop(buffer, "array_count",
dev->node_props.array_count);
sysfs_show_32bit_prop(buffer, "simd_arrays_per_engine",
dev->node_props.simd_arrays_per_engine);
sysfs_show_32bit_prop(buffer, "cu_per_simd_array",
dev->node_props.cu_per_simd_array);
sysfs_show_32bit_prop(buffer, "simd_per_cu",
dev->node_props.simd_per_cu);
sysfs_show_32bit_prop(buffer, "max_slots_scratch_cu",
dev->node_props.max_slots_scratch_cu);
sysfs_show_32bit_prop(buffer, "vendor_id",
dev->node_props.vendor_id);
sysfs_show_32bit_prop(buffer, "device_id",
dev->node_props.device_id);
sysfs_show_32bit_prop(buffer, "location_id",
dev->node_props.location_id);
if (dev->gpu) {
log_max_watch_addr =
__ilog2_u32(dev->gpu->device_info->num_of_watch_points);
if (log_max_watch_addr) {
dev->node_props.capability |=
HSA_CAP_WATCH_POINTS_SUPPORTED;
dev->node_props.capability |=
((log_max_watch_addr <<
HSA_CAP_WATCH_POINTS_TOTALBITS_SHIFT) &
HSA_CAP_WATCH_POINTS_TOTALBITS_MASK);
}
sysfs_show_32bit_prop(buffer, "max_engine_clk_fcompute",
kfd2kgd->get_max_engine_clock_in_mhz(
dev->gpu->kgd));
sysfs_show_64bit_prop(buffer, "local_mem_size",
kfd2kgd->get_vmem_size(dev->gpu->kgd));
sysfs_show_32bit_prop(buffer, "fw_version",
kfd2kgd->get_fw_version(
dev->gpu->kgd,
KGD_ENGINE_MEC1));
}
return sysfs_show_32bit_prop(buffer, "max_engine_clk_ccompute",
cpufreq_quick_get_max(0)/1000);
}
static const struct sysfs_ops node_ops = {

View File

@ -0,0 +1,293 @@
/*
* Copyright 2012 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef CIK_STRUCTS_H_
#define CIK_STRUCTS_H_
struct cik_mqd {
uint32_t header;
uint32_t compute_dispatch_initiator;
uint32_t compute_dim_x;
uint32_t compute_dim_y;
uint32_t compute_dim_z;
uint32_t compute_start_x;
uint32_t compute_start_y;
uint32_t compute_start_z;
uint32_t compute_num_thread_x;
uint32_t compute_num_thread_y;
uint32_t compute_num_thread_z;
uint32_t compute_pipelinestat_enable;
uint32_t compute_perfcount_enable;
uint32_t compute_pgm_lo;
uint32_t compute_pgm_hi;
uint32_t compute_tba_lo;
uint32_t compute_tba_hi;
uint32_t compute_tma_lo;
uint32_t compute_tma_hi;
uint32_t compute_pgm_rsrc1;
uint32_t compute_pgm_rsrc2;
uint32_t compute_vmid;
uint32_t compute_resource_limits;
uint32_t compute_static_thread_mgmt_se0;
uint32_t compute_static_thread_mgmt_se1;
uint32_t compute_tmpring_size;
uint32_t compute_static_thread_mgmt_se2;
uint32_t compute_static_thread_mgmt_se3;
uint32_t compute_restart_x;
uint32_t compute_restart_y;
uint32_t compute_restart_z;
uint32_t compute_thread_trace_enable;
uint32_t compute_misc_reserved;
uint32_t compute_user_data_0;
uint32_t compute_user_data_1;
uint32_t compute_user_data_2;
uint32_t compute_user_data_3;
uint32_t compute_user_data_4;
uint32_t compute_user_data_5;
uint32_t compute_user_data_6;
uint32_t compute_user_data_7;
uint32_t compute_user_data_8;
uint32_t compute_user_data_9;
uint32_t compute_user_data_10;
uint32_t compute_user_data_11;
uint32_t compute_user_data_12;
uint32_t compute_user_data_13;
uint32_t compute_user_data_14;
uint32_t compute_user_data_15;
uint32_t cp_compute_csinvoc_count_lo;
uint32_t cp_compute_csinvoc_count_hi;
uint32_t cp_mqd_base_addr_lo;
uint32_t cp_mqd_base_addr_hi;
uint32_t cp_hqd_active;
uint32_t cp_hqd_vmid;
uint32_t cp_hqd_persistent_state;
uint32_t cp_hqd_pipe_priority;
uint32_t cp_hqd_queue_priority;
uint32_t cp_hqd_quantum;
uint32_t cp_hqd_pq_base_lo;
uint32_t cp_hqd_pq_base_hi;
uint32_t cp_hqd_pq_rptr;
uint32_t cp_hqd_pq_rptr_report_addr_lo;
uint32_t cp_hqd_pq_rptr_report_addr_hi;
uint32_t cp_hqd_pq_wptr_poll_addr_lo;
uint32_t cp_hqd_pq_wptr_poll_addr_hi;
uint32_t cp_hqd_pq_doorbell_control;
uint32_t cp_hqd_pq_wptr;
uint32_t cp_hqd_pq_control;
uint32_t cp_hqd_ib_base_addr_lo;
uint32_t cp_hqd_ib_base_addr_hi;
uint32_t cp_hqd_ib_rptr;
uint32_t cp_hqd_ib_control;
uint32_t cp_hqd_iq_timer;
uint32_t cp_hqd_iq_rptr;
uint32_t cp_hqd_dequeue_request;
uint32_t cp_hqd_dma_offload;
uint32_t cp_hqd_sema_cmd;
uint32_t cp_hqd_msg_type;
uint32_t cp_hqd_atomic0_preop_lo;
uint32_t cp_hqd_atomic0_preop_hi;
uint32_t cp_hqd_atomic1_preop_lo;
uint32_t cp_hqd_atomic1_preop_hi;
uint32_t cp_hqd_hq_status0;
uint32_t cp_hqd_hq_control0;
uint32_t cp_mqd_control;
uint32_t cp_mqd_query_time_lo;
uint32_t cp_mqd_query_time_hi;
uint32_t cp_mqd_connect_start_time_lo;
uint32_t cp_mqd_connect_start_time_hi;
uint32_t cp_mqd_connect_end_time_lo;
uint32_t cp_mqd_connect_end_time_hi;
uint32_t cp_mqd_connect_end_wf_count;
uint32_t cp_mqd_connect_end_pq_rptr;
uint32_t cp_mqd_connect_end_pq_wptr;
uint32_t cp_mqd_connect_end_ib_rptr;
uint32_t reserved_96;
uint32_t reserved_97;
uint32_t reserved_98;
uint32_t reserved_99;
uint32_t iqtimer_pkt_header;
uint32_t iqtimer_pkt_dw0;
uint32_t iqtimer_pkt_dw1;
uint32_t iqtimer_pkt_dw2;
uint32_t iqtimer_pkt_dw3;
uint32_t iqtimer_pkt_dw4;
uint32_t iqtimer_pkt_dw5;
uint32_t iqtimer_pkt_dw6;
uint32_t reserved_108;
uint32_t reserved_109;
uint32_t reserved_110;
uint32_t reserved_111;
uint32_t queue_doorbell_id0;
uint32_t queue_doorbell_id1;
uint32_t queue_doorbell_id2;
uint32_t queue_doorbell_id3;
uint32_t queue_doorbell_id4;
uint32_t queue_doorbell_id5;
uint32_t queue_doorbell_id6;
uint32_t queue_doorbell_id7;
uint32_t queue_doorbell_id8;
uint32_t queue_doorbell_id9;
uint32_t queue_doorbell_id10;
uint32_t queue_doorbell_id11;
uint32_t queue_doorbell_id12;
uint32_t queue_doorbell_id13;
uint32_t queue_doorbell_id14;
uint32_t queue_doorbell_id15;
};
struct cik_sdma_rlc_registers {
uint32_t sdma_rlc_rb_cntl;
uint32_t sdma_rlc_rb_base;
uint32_t sdma_rlc_rb_base_hi;
uint32_t sdma_rlc_rb_rptr;
uint32_t sdma_rlc_rb_wptr;
uint32_t sdma_rlc_rb_wptr_poll_cntl;
uint32_t sdma_rlc_rb_wptr_poll_addr_hi;
uint32_t sdma_rlc_rb_wptr_poll_addr_lo;
uint32_t sdma_rlc_rb_rptr_addr_hi;
uint32_t sdma_rlc_rb_rptr_addr_lo;
uint32_t sdma_rlc_ib_cntl;
uint32_t sdma_rlc_ib_rptr;
uint32_t sdma_rlc_ib_offset;
uint32_t sdma_rlc_ib_base_lo;
uint32_t sdma_rlc_ib_base_hi;
uint32_t sdma_rlc_ib_size;
uint32_t sdma_rlc_skip_cntl;
uint32_t sdma_rlc_context_status;
uint32_t sdma_rlc_doorbell;
uint32_t sdma_rlc_virtual_addr;
uint32_t sdma_rlc_ape1_cntl;
uint32_t sdma_rlc_doorbell_log;
uint32_t reserved_22;
uint32_t reserved_23;
uint32_t reserved_24;
uint32_t reserved_25;
uint32_t reserved_26;
uint32_t reserved_27;
uint32_t reserved_28;
uint32_t reserved_29;
uint32_t reserved_30;
uint32_t reserved_31;
uint32_t reserved_32;
uint32_t reserved_33;
uint32_t reserved_34;
uint32_t reserved_35;
uint32_t reserved_36;
uint32_t reserved_37;
uint32_t reserved_38;
uint32_t reserved_39;
uint32_t reserved_40;
uint32_t reserved_41;
uint32_t reserved_42;
uint32_t reserved_43;
uint32_t reserved_44;
uint32_t reserved_45;
uint32_t reserved_46;
uint32_t reserved_47;
uint32_t reserved_48;
uint32_t reserved_49;
uint32_t reserved_50;
uint32_t reserved_51;
uint32_t reserved_52;
uint32_t reserved_53;
uint32_t reserved_54;
uint32_t reserved_55;
uint32_t reserved_56;
uint32_t reserved_57;
uint32_t reserved_58;
uint32_t reserved_59;
uint32_t reserved_60;
uint32_t reserved_61;
uint32_t reserved_62;
uint32_t reserved_63;
uint32_t reserved_64;
uint32_t reserved_65;
uint32_t reserved_66;
uint32_t reserved_67;
uint32_t reserved_68;
uint32_t reserved_69;
uint32_t reserved_70;
uint32_t reserved_71;
uint32_t reserved_72;
uint32_t reserved_73;
uint32_t reserved_74;
uint32_t reserved_75;
uint32_t reserved_76;
uint32_t reserved_77;
uint32_t reserved_78;
uint32_t reserved_79;
uint32_t reserved_80;
uint32_t reserved_81;
uint32_t reserved_82;
uint32_t reserved_83;
uint32_t reserved_84;
uint32_t reserved_85;
uint32_t reserved_86;
uint32_t reserved_87;
uint32_t reserved_88;
uint32_t reserved_89;
uint32_t reserved_90;
uint32_t reserved_91;
uint32_t reserved_92;
uint32_t reserved_93;
uint32_t reserved_94;
uint32_t reserved_95;
uint32_t reserved_96;
uint32_t reserved_97;
uint32_t reserved_98;
uint32_t reserved_99;
uint32_t reserved_100;
uint32_t reserved_101;
uint32_t reserved_102;
uint32_t reserved_103;
uint32_t reserved_104;
uint32_t reserved_105;
uint32_t reserved_106;
uint32_t reserved_107;
uint32_t reserved_108;
uint32_t reserved_109;
uint32_t reserved_110;
uint32_t reserved_111;
uint32_t reserved_112;
uint32_t reserved_113;
uint32_t reserved_114;
uint32_t reserved_115;
uint32_t reserved_116;
uint32_t reserved_117;
uint32_t reserved_118;
uint32_t reserved_119;
uint32_t reserved_120;
uint32_t reserved_121;
uint32_t reserved_122;
uint32_t reserved_123;
uint32_t reserved_124;
uint32_t reserved_125;
uint32_t reserved_126;
uint32_t reserved_127;
uint32_t sdma_engine_id;
uint32_t sdma_queue_id;
};
#endif /* CIK_STRUCTS_H_ */

View File

@ -110,17 +110,10 @@ struct kgd2kfd_calls {
/**
* struct kfd2kgd_calls
*
* @init_sa_manager: Initialize an instance of the sa manager, used by
* amdkfd for all system memory allocations that are mapped to the GART
* address space
* @init_gtt_mem_allocation: Allocate a buffer on the gart aperture.
* The buffer can be used for mqds, hpds, kernel queue, fence and runlists
*
* @fini_sa_manager: Releases all memory allocations for amdkfd that are
* handled by kgd sa manager
*
* @allocate_mem: Allocate a buffer from amdkfd's sa manager. The buffer can
* be used for mqds, hpds, kernel queue, fence and runlists
*
* @free_mem: Frees a buffer that was allocated by amdkfd's sa manager
* @free_gtt_mem: Frees a buffer that was allocated on the gart aperture
*
* @get_vmem_size: Retrieves (physical) size of VRAM
*
@ -136,18 +129,23 @@ struct kgd2kfd_calls {
* @set_pasid_vmid_mapping: Exposes pasid/vmid pair to the H/W for no cp
* scheduling mode. Only used for no cp scheduling mode.
*
* @init_memory: Initializes memory apertures to fixed base/limit address
* and non cached memory types.
*
* @init_pipeline: Initialized the compute pipelines.
*
* @hqd_load: Loads the mqd structure to a H/W hqd slot. used only for no cp
* sceduling mode.
*
* @hqd_sdma_load: Loads the SDMA mqd structure to a H/W SDMA hqd slot.
* used only for no HWS mode.
*
* @hqd_is_occupies: Checks if a hqd slot is occupied.
*
* @hqd_destroy: Destructs and preempts the queue assigned to that hqd slot.
*
* @hqd_sdma_is_occupied: Checks if an SDMA hqd slot is occupied.
*
* @hqd_sdma_destroy: Destructs and preempts the SDMA queue assigned to that
* SDMA hqd slot.
*
* @get_fw_version: Returns FW versions from the header
*
* This structure contains function pointers to services that the kgd driver
@ -155,13 +153,11 @@ struct kgd2kfd_calls {
*
*/
struct kfd2kgd_calls {
/* Memory management. */
int (*init_sa_manager)(struct kgd_dev *kgd, unsigned int size);
void (*fini_sa_manager)(struct kgd_dev *kgd);
int (*allocate_mem)(struct kgd_dev *kgd, size_t size, size_t alignment,
enum kgd_memory_pool pool, struct kgd_mem **mem);
int (*init_gtt_mem_allocation)(struct kgd_dev *kgd, size_t size,
void **mem_obj, uint64_t *gpu_addr,
void **cpu_ptr);
void (*free_mem)(struct kgd_dev *kgd, struct kgd_mem *mem);
void (*free_gtt_mem)(struct kgd_dev *kgd, void *mem_obj);
uint64_t (*get_vmem_size)(struct kgd_dev *kgd);
uint64_t (*get_gpu_clock_counter)(struct kgd_dev *kgd);
@ -176,25 +172,32 @@ struct kfd2kgd_calls {
int (*set_pasid_vmid_mapping)(struct kgd_dev *kgd, unsigned int pasid,
unsigned int vmid);
int (*init_memory)(struct kgd_dev *kgd);
int (*init_pipeline)(struct kgd_dev *kgd, uint32_t pipe_id,
uint32_t hpd_size, uint64_t hpd_gpu_addr);
int (*hqd_load)(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
uint32_t queue_id, uint32_t __user *wptr);
int (*hqd_sdma_load)(struct kgd_dev *kgd, void *mqd);
bool (*hqd_is_occupied)(struct kgd_dev *kgd, uint64_t queue_address,
uint32_t pipe_id, uint32_t queue_id);
int (*hqd_destroy)(struct kgd_dev *kgd, uint32_t reset_type,
unsigned int timeout, uint32_t pipe_id,
uint32_t queue_id);
bool (*hqd_sdma_is_occupied)(struct kgd_dev *kgd, void *mqd);
int (*hqd_sdma_destroy)(struct kgd_dev *kgd, void *mqd,
unsigned int timeout);
uint16_t (*get_fw_version)(struct kgd_dev *kgd,
enum kgd_engine_type type);
};
bool kgd2kfd_init(unsigned interface_version,
const struct kfd2kgd_calls *f2g,
const struct kgd2kfd_calls **g2f);
const struct kfd2kgd_calls *f2g,
const struct kgd2kfd_calls **g2f);
#endif /* KGD_KFD_INTERFACE_H_INCLUDED */
#endif /* KGD_KFD_INTERFACE_H_INCLUDED */

View File

@ -653,10 +653,6 @@ static int armada_drm_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
return 0;
}
static void armada_drm_crtc_load_lut(struct drm_crtc *crtc)
{
}
/* The mode_config.mutex will be held for this call */
static void armada_drm_crtc_disable(struct drm_crtc *crtc)
{
@ -678,7 +674,6 @@ static const struct drm_crtc_helper_funcs armada_crtc_helper_funcs = {
.mode_fixup = armada_drm_crtc_mode_fixup,
.mode_set = armada_drm_crtc_mode_set,
.mode_set_base = armada_drm_crtc_mode_set_base,
.load_lut = armada_drm_crtc_load_lut,
.disable = armada_drm_crtc_disable,
};

View File

@ -335,18 +335,27 @@ int ast_fbdev_init(struct drm_device *dev)
ret = drm_fb_helper_init(dev, &afbdev->helper,
1, 1);
if (ret) {
kfree(afbdev);
return ret;
}
if (ret)
goto free;
drm_fb_helper_single_add_all_connectors(&afbdev->helper);
ret = drm_fb_helper_single_add_all_connectors(&afbdev->helper);
if (ret)
goto fini;
/* disable all the possible outputs/crtcs before entering KMS mode */
drm_helper_disable_unused_functions(dev);
drm_fb_helper_initial_config(&afbdev->helper, 32);
ret = drm_fb_helper_initial_config(&afbdev->helper, 32);
if (ret)
goto fini;
return 0;
fini:
drm_fb_helper_fini(&afbdev->helper);
free:
kfree(afbdev);
return ret;
}
void ast_fbdev_fini(struct drm_device *dev)

View File

@ -0,0 +1,11 @@
config DRM_ATMEL_HLCDC
tristate "DRM Support for ATMEL HLCDC Display Controller"
depends on DRM && OF && COMMON_CLK && MFD_ATMEL_HLCDC && ARM
select DRM_GEM_CMA_HELPER
select DRM_KMS_HELPER
select DRM_KMS_FB_HELPER
select DRM_KMS_CMA_HELPER
select DRM_PANEL
help
Choose this option if you have an ATMEL SoC with an HLCDC display
controller (i.e. at91sam9n12, at91sam9x5 family or sama5d3 family).

View File

@ -0,0 +1,7 @@
atmel-hlcdc-dc-y := atmel_hlcdc_crtc.o \
atmel_hlcdc_dc.o \
atmel_hlcdc_layer.o \
atmel_hlcdc_output.o \
atmel_hlcdc_plane.o
obj-$(CONFIG_DRM_ATMEL_HLCDC) += atmel-hlcdc-dc.o

View File

@ -0,0 +1,406 @@
/*
* Copyright (C) 2014 Traphandler
* Copyright (C) 2014 Free Electrons
*
* Author: Jean-Jacques Hiblot <jjhiblot@traphandler.com>
* Author: Boris BREZILLON <boris.brezillon@free-electrons.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/clk.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drmP.h>
#include <video/videomode.h>
#include "atmel_hlcdc_dc.h"
/**
* Atmel HLCDC CRTC structure
*
* @base: base DRM CRTC structure
* @hlcdc: pointer to the atmel_hlcdc structure provided by the MFD device
* @event: pointer to the current page flip event
* @id: CRTC id (returned by drm_crtc_index)
* @dpms: DPMS mode
*/
struct atmel_hlcdc_crtc {
struct drm_crtc base;
struct atmel_hlcdc_dc *dc;
struct drm_pending_vblank_event *event;
int id;
int dpms;
};
static inline struct atmel_hlcdc_crtc *
drm_crtc_to_atmel_hlcdc_crtc(struct drm_crtc *crtc)
{
return container_of(crtc, struct atmel_hlcdc_crtc, base);
}
static void atmel_hlcdc_crtc_dpms(struct drm_crtc *c, int mode)
{
struct drm_device *dev = c->dev;
struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c);
struct regmap *regmap = crtc->dc->hlcdc->regmap;
unsigned int status;
if (mode != DRM_MODE_DPMS_ON)
mode = DRM_MODE_DPMS_OFF;
if (crtc->dpms == mode)
return;
pm_runtime_get_sync(dev->dev);
if (mode != DRM_MODE_DPMS_ON) {
regmap_write(regmap, ATMEL_HLCDC_DIS, ATMEL_HLCDC_DISP);
while (!regmap_read(regmap, ATMEL_HLCDC_SR, &status) &&
(status & ATMEL_HLCDC_DISP))
cpu_relax();
regmap_write(regmap, ATMEL_HLCDC_DIS, ATMEL_HLCDC_SYNC);
while (!regmap_read(regmap, ATMEL_HLCDC_SR, &status) &&
(status & ATMEL_HLCDC_SYNC))
cpu_relax();
regmap_write(regmap, ATMEL_HLCDC_DIS, ATMEL_HLCDC_PIXEL_CLK);
while (!regmap_read(regmap, ATMEL_HLCDC_SR, &status) &&
(status & ATMEL_HLCDC_PIXEL_CLK))
cpu_relax();
clk_disable_unprepare(crtc->dc->hlcdc->sys_clk);
pm_runtime_allow(dev->dev);
} else {
pm_runtime_forbid(dev->dev);
clk_prepare_enable(crtc->dc->hlcdc->sys_clk);
regmap_write(regmap, ATMEL_HLCDC_EN, ATMEL_HLCDC_PIXEL_CLK);
while (!regmap_read(regmap, ATMEL_HLCDC_SR, &status) &&
!(status & ATMEL_HLCDC_PIXEL_CLK))
cpu_relax();
regmap_write(regmap, ATMEL_HLCDC_EN, ATMEL_HLCDC_SYNC);
while (!regmap_read(regmap, ATMEL_HLCDC_SR, &status) &&
!(status & ATMEL_HLCDC_SYNC))
cpu_relax();
regmap_write(regmap, ATMEL_HLCDC_EN, ATMEL_HLCDC_DISP);
while (!regmap_read(regmap, ATMEL_HLCDC_SR, &status) &&
!(status & ATMEL_HLCDC_DISP))
cpu_relax();
}
pm_runtime_put_sync(dev->dev);
crtc->dpms = mode;
}
static int atmel_hlcdc_crtc_mode_set(struct drm_crtc *c,
struct drm_display_mode *mode,
struct drm_display_mode *adj,
int x, int y,
struct drm_framebuffer *old_fb)
{
struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c);
struct regmap *regmap = crtc->dc->hlcdc->regmap;
struct drm_plane *plane = c->primary;
struct drm_framebuffer *fb;
unsigned long mode_rate;
struct videomode vm;
unsigned long prate;
unsigned int cfg;
int div;
if (atmel_hlcdc_dc_mode_valid(crtc->dc, adj) != MODE_OK)
return -EINVAL;
vm.vfront_porch = adj->crtc_vsync_start - adj->crtc_vdisplay;
vm.vback_porch = adj->crtc_vtotal - adj->crtc_vsync_end;
vm.vsync_len = adj->crtc_vsync_end - adj->crtc_vsync_start;
vm.hfront_porch = adj->crtc_hsync_start - adj->crtc_hdisplay;
vm.hback_porch = adj->crtc_htotal - adj->crtc_hsync_end;
vm.hsync_len = adj->crtc_hsync_end - adj->crtc_hsync_start;
regmap_write(regmap, ATMEL_HLCDC_CFG(1),
(vm.hsync_len - 1) | ((vm.vsync_len - 1) << 16));
regmap_write(regmap, ATMEL_HLCDC_CFG(2),
(vm.vfront_porch - 1) | (vm.vback_porch << 16));
regmap_write(regmap, ATMEL_HLCDC_CFG(3),
(vm.hfront_porch - 1) | ((vm.hback_porch - 1) << 16));
regmap_write(regmap, ATMEL_HLCDC_CFG(4),
(adj->crtc_hdisplay - 1) |
((adj->crtc_vdisplay - 1) << 16));
cfg = ATMEL_HLCDC_CLKPOL;
prate = clk_get_rate(crtc->dc->hlcdc->sys_clk);
mode_rate = mode->crtc_clock * 1000;
if ((prate / 2) < mode_rate) {
prate *= 2;
cfg |= ATMEL_HLCDC_CLKSEL;
}
div = DIV_ROUND_UP(prate, mode_rate);
if (div < 2)
div = 2;
cfg |= ATMEL_HLCDC_CLKDIV(div);
regmap_update_bits(regmap, ATMEL_HLCDC_CFG(0),
ATMEL_HLCDC_CLKSEL | ATMEL_HLCDC_CLKDIV_MASK |
ATMEL_HLCDC_CLKPOL, cfg);
cfg = 0;
if (mode->flags & DRM_MODE_FLAG_NVSYNC)
cfg |= ATMEL_HLCDC_VSPOL;
if (mode->flags & DRM_MODE_FLAG_NHSYNC)
cfg |= ATMEL_HLCDC_HSPOL;
regmap_update_bits(regmap, ATMEL_HLCDC_CFG(5),
ATMEL_HLCDC_HSPOL | ATMEL_HLCDC_VSPOL |
ATMEL_HLCDC_VSPDLYS | ATMEL_HLCDC_VSPDLYE |
ATMEL_HLCDC_DISPPOL | ATMEL_HLCDC_DISPDLY |
ATMEL_HLCDC_VSPSU | ATMEL_HLCDC_VSPHO |
ATMEL_HLCDC_GUARDTIME_MASK,
cfg);
fb = plane->fb;
plane->fb = old_fb;
return atmel_hlcdc_plane_update_with_mode(plane, c, fb, 0, 0,
adj->hdisplay, adj->vdisplay,
x << 16, y << 16,
adj->hdisplay << 16,
adj->vdisplay << 16,
adj);
}
int atmel_hlcdc_crtc_mode_set_base(struct drm_crtc *c, int x, int y,
struct drm_framebuffer *old_fb)
{
struct drm_plane *plane = c->primary;
struct drm_framebuffer *fb = plane->fb;
struct drm_display_mode *mode = &c->hwmode;
plane->fb = old_fb;
return plane->funcs->update_plane(plane, c, fb,
0, 0,
mode->hdisplay,
mode->vdisplay,
x << 16, y << 16,
mode->hdisplay << 16,
mode->vdisplay << 16);
}
static void atmel_hlcdc_crtc_prepare(struct drm_crtc *crtc)
{
atmel_hlcdc_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
}
static void atmel_hlcdc_crtc_commit(struct drm_crtc *crtc)
{
atmel_hlcdc_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
}
static bool atmel_hlcdc_crtc_mode_fixup(struct drm_crtc *crtc,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
return true;
}
static void atmel_hlcdc_crtc_disable(struct drm_crtc *crtc)
{
struct drm_plane *plane;
atmel_hlcdc_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
crtc->primary->funcs->disable_plane(crtc->primary);
drm_for_each_legacy_plane(plane, &crtc->dev->mode_config.plane_list) {
if (plane->crtc != crtc)
continue;
plane->funcs->disable_plane(crtc->primary);
plane->crtc = NULL;
}
}
static const struct drm_crtc_helper_funcs lcdc_crtc_helper_funcs = {
.mode_fixup = atmel_hlcdc_crtc_mode_fixup,
.dpms = atmel_hlcdc_crtc_dpms,
.mode_set = atmel_hlcdc_crtc_mode_set,
.mode_set_base = atmel_hlcdc_crtc_mode_set_base,
.prepare = atmel_hlcdc_crtc_prepare,
.commit = atmel_hlcdc_crtc_commit,
.disable = atmel_hlcdc_crtc_disable,
};
static void atmel_hlcdc_crtc_destroy(struct drm_crtc *c)
{
struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c);
drm_crtc_cleanup(c);
kfree(crtc);
}
void atmel_hlcdc_crtc_cancel_page_flip(struct drm_crtc *c,
struct drm_file *file)
{
struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c);
struct drm_pending_vblank_event *event;
struct drm_device *dev = c->dev;
unsigned long flags;
spin_lock_irqsave(&dev->event_lock, flags);
event = crtc->event;
if (event && event->base.file_priv == file) {
event->base.destroy(&event->base);
drm_vblank_put(dev, crtc->id);
crtc->event = NULL;
}
spin_unlock_irqrestore(&dev->event_lock, flags);
}
static void atmel_hlcdc_crtc_finish_page_flip(struct atmel_hlcdc_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
unsigned long flags;
spin_lock_irqsave(&dev->event_lock, flags);
if (crtc->event) {
drm_send_vblank_event(dev, crtc->id, crtc->event);
drm_vblank_put(dev, crtc->id);
crtc->event = NULL;
}
spin_unlock_irqrestore(&dev->event_lock, flags);
}
void atmel_hlcdc_crtc_irq(struct drm_crtc *c)
{
drm_handle_vblank(c->dev, 0);
atmel_hlcdc_crtc_finish_page_flip(drm_crtc_to_atmel_hlcdc_crtc(c));
}
static int atmel_hlcdc_crtc_page_flip(struct drm_crtc *c,
struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event,
uint32_t page_flip_flags)
{
struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c);
struct atmel_hlcdc_plane_update_req req;
struct drm_plane *plane = c->primary;
struct drm_device *dev = c->dev;
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&dev->event_lock, flags);
if (crtc->event)
ret = -EBUSY;
spin_unlock_irqrestore(&dev->event_lock, flags);
if (ret)
return ret;
memset(&req, 0, sizeof(req));
req.crtc_x = 0;
req.crtc_y = 0;
req.crtc_h = c->mode.crtc_vdisplay;
req.crtc_w = c->mode.crtc_hdisplay;
req.src_x = c->x << 16;
req.src_y = c->y << 16;
req.src_w = req.crtc_w << 16;
req.src_h = req.crtc_h << 16;
req.fb = fb;
ret = atmel_hlcdc_plane_prepare_update_req(plane, &req, &c->hwmode);
if (ret)
return ret;
if (event) {
drm_vblank_get(c->dev, crtc->id);
spin_lock_irqsave(&dev->event_lock, flags);
crtc->event = event;
spin_unlock_irqrestore(&dev->event_lock, flags);
}
ret = atmel_hlcdc_plane_apply_update_req(plane, &req);
if (ret)
crtc->event = NULL;
else
plane->fb = fb;
return ret;
}
static const struct drm_crtc_funcs atmel_hlcdc_crtc_funcs = {
.page_flip = atmel_hlcdc_crtc_page_flip,
.set_config = drm_crtc_helper_set_config,
.destroy = atmel_hlcdc_crtc_destroy,
};
int atmel_hlcdc_crtc_create(struct drm_device *dev)
{
struct atmel_hlcdc_dc *dc = dev->dev_private;
struct atmel_hlcdc_planes *planes = dc->planes;
struct atmel_hlcdc_crtc *crtc;
int ret;
int i;
crtc = kzalloc(sizeof(*crtc), GFP_KERNEL);
if (!crtc)
return -ENOMEM;
crtc->dpms = DRM_MODE_DPMS_OFF;
crtc->dc = dc;
ret = drm_crtc_init_with_planes(dev, &crtc->base,
&planes->primary->base,
planes->cursor ? &planes->cursor->base : NULL,
&atmel_hlcdc_crtc_funcs);
if (ret < 0)
goto fail;
crtc->id = drm_crtc_index(&crtc->base);
if (planes->cursor)
planes->cursor->base.possible_crtcs = 1 << crtc->id;
for (i = 0; i < planes->noverlays; i++)
planes->overlays[i]->base.possible_crtcs = 1 << crtc->id;
drm_crtc_helper_add(&crtc->base, &lcdc_crtc_helper_funcs);
dc->crtc = &crtc->base;
return 0;
fail:
atmel_hlcdc_crtc_destroy(&crtc->base);
return ret;
}

View File

@ -0,0 +1,579 @@
/*
* Copyright (C) 2014 Traphandler
* Copyright (C) 2014 Free Electrons
* Copyright (C) 2014 Atmel
*
* Author: Jean-Jacques Hiblot <jjhiblot@traphandler.com>
* Author: Boris BREZILLON <boris.brezillon@free-electrons.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/clk.h>
#include <linux/irq.h>
#include <linux/irqchip.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
#include "atmel_hlcdc_dc.h"
#define ATMEL_HLCDC_LAYER_IRQS_OFFSET 8
static const struct atmel_hlcdc_layer_desc atmel_hlcdc_sama5d3_layers[] = {
{
.name = "base",
.formats = &atmel_hlcdc_plane_rgb_formats,
.regs_offset = 0x40,
.id = 0,
.type = ATMEL_HLCDC_BASE_LAYER,
.nconfigs = 7,
.layout = {
.xstride = { 2 },
.default_color = 3,
.general_config = 4,
.disc_pos = 5,
.disc_size = 6,
},
},
{
.name = "overlay1",
.formats = &atmel_hlcdc_plane_rgb_formats,
.regs_offset = 0x140,
.id = 1,
.type = ATMEL_HLCDC_OVERLAY_LAYER,
.nconfigs = 10,
.layout = {
.pos = 2,
.size = 3,
.xstride = { 4 },
.pstride = { 5 },
.default_color = 6,
.chroma_key = 7,
.chroma_key_mask = 8,
.general_config = 9,
},
},
{
.name = "overlay2",
.formats = &atmel_hlcdc_plane_rgb_formats,
.regs_offset = 0x240,
.id = 2,
.type = ATMEL_HLCDC_OVERLAY_LAYER,
.nconfigs = 10,
.layout = {
.pos = 2,
.size = 3,
.xstride = { 4 },
.pstride = { 5 },
.default_color = 6,
.chroma_key = 7,
.chroma_key_mask = 8,
.general_config = 9,
},
},
{
.name = "high-end-overlay",
.formats = &atmel_hlcdc_plane_rgb_and_yuv_formats,
.regs_offset = 0x340,
.id = 3,
.type = ATMEL_HLCDC_OVERLAY_LAYER,
.nconfigs = 42,
.layout = {
.pos = 2,
.size = 3,
.memsize = 4,
.xstride = { 5, 7 },
.pstride = { 6, 8 },
.default_color = 9,
.chroma_key = 10,
.chroma_key_mask = 11,
.general_config = 12,
.csc = 14,
},
},
{
.name = "cursor",
.formats = &atmel_hlcdc_plane_rgb_formats,
.regs_offset = 0x440,
.id = 4,
.type = ATMEL_HLCDC_CURSOR_LAYER,
.nconfigs = 10,
.max_width = 128,
.max_height = 128,
.layout = {
.pos = 2,
.size = 3,
.xstride = { 4 },
.pstride = { 5 },
.default_color = 6,
.chroma_key = 7,
.chroma_key_mask = 8,
.general_config = 9,
},
},
};
static const struct atmel_hlcdc_dc_desc atmel_hlcdc_dc_sama5d3 = {
.min_width = 0,
.min_height = 0,
.max_width = 2048,
.max_height = 2048,
.nlayers = ARRAY_SIZE(atmel_hlcdc_sama5d3_layers),
.layers = atmel_hlcdc_sama5d3_layers,
};
static const struct of_device_id atmel_hlcdc_of_match[] = {
{
.compatible = "atmel,sama5d3-hlcdc",
.data = &atmel_hlcdc_dc_sama5d3,
},
{ /* sentinel */ },
};
int atmel_hlcdc_dc_mode_valid(struct atmel_hlcdc_dc *dc,
struct drm_display_mode *mode)
{
int vfront_porch = mode->vsync_start - mode->vdisplay;
int vback_porch = mode->vtotal - mode->vsync_end;
int vsync_len = mode->vsync_end - mode->vsync_start;
int hfront_porch = mode->hsync_start - mode->hdisplay;
int hback_porch = mode->htotal - mode->hsync_end;
int hsync_len = mode->hsync_end - mode->hsync_start;
if (hsync_len > 0x40 || hsync_len < 1)
return MODE_HSYNC;
if (vsync_len > 0x40 || vsync_len < 1)
return MODE_VSYNC;
if (hfront_porch > 0x200 || hfront_porch < 1 ||
hback_porch > 0x200 || hback_porch < 1 ||
mode->hdisplay < 1)
return MODE_H_ILLEGAL;
if (vfront_porch > 0x40 || vfront_porch < 1 ||
vback_porch > 0x40 || vback_porch < 0 ||
mode->vdisplay < 1)
return MODE_V_ILLEGAL;
return MODE_OK;
}
static irqreturn_t atmel_hlcdc_dc_irq_handler(int irq, void *data)
{
struct drm_device *dev = data;
struct atmel_hlcdc_dc *dc = dev->dev_private;
unsigned long status;
unsigned int imr, isr;
int i;
regmap_read(dc->hlcdc->regmap, ATMEL_HLCDC_IMR, &imr);
regmap_read(dc->hlcdc->regmap, ATMEL_HLCDC_ISR, &isr);
status = imr & isr;
if (!status)
return IRQ_NONE;
if (status & ATMEL_HLCDC_SOF)
atmel_hlcdc_crtc_irq(dc->crtc);
for (i = 0; i < ATMEL_HLCDC_MAX_LAYERS; i++) {
struct atmel_hlcdc_layer *layer = dc->layers[i];
if (!(ATMEL_HLCDC_LAYER_STATUS(i) & status) || !layer)
continue;
atmel_hlcdc_layer_irq(layer);
}
return IRQ_HANDLED;
}
static struct drm_framebuffer *atmel_hlcdc_fb_create(struct drm_device *dev,
struct drm_file *file_priv, struct drm_mode_fb_cmd2 *mode_cmd)
{
return drm_fb_cma_create(dev, file_priv, mode_cmd);
}
static void atmel_hlcdc_fb_output_poll_changed(struct drm_device *dev)
{
struct atmel_hlcdc_dc *dc = dev->dev_private;
if (dc->fbdev) {
drm_fbdev_cma_hotplug_event(dc->fbdev);
} else {
dc->fbdev = drm_fbdev_cma_init(dev, 24,
dev->mode_config.num_crtc,
dev->mode_config.num_connector);
if (IS_ERR(dc->fbdev))
dc->fbdev = NULL;
}
}
static const struct drm_mode_config_funcs mode_config_funcs = {
.fb_create = atmel_hlcdc_fb_create,
.output_poll_changed = atmel_hlcdc_fb_output_poll_changed,
};
static int atmel_hlcdc_dc_modeset_init(struct drm_device *dev)
{
struct atmel_hlcdc_dc *dc = dev->dev_private;
struct atmel_hlcdc_planes *planes;
int ret;
int i;
drm_mode_config_init(dev);
ret = atmel_hlcdc_create_outputs(dev);
if (ret) {
dev_err(dev->dev, "failed to create panel: %d\n", ret);
return ret;
}
planes = atmel_hlcdc_create_planes(dev);
if (IS_ERR(planes)) {
dev_err(dev->dev, "failed to create planes\n");
return PTR_ERR(planes);
}
dc->planes = planes;
dc->layers[planes->primary->layer.desc->id] =
&planes->primary->layer;
if (planes->cursor)
dc->layers[planes->cursor->layer.desc->id] =
&planes->cursor->layer;
for (i = 0; i < planes->noverlays; i++)
dc->layers[planes->overlays[i]->layer.desc->id] =
&planes->overlays[i]->layer;
ret = atmel_hlcdc_crtc_create(dev);
if (ret) {
dev_err(dev->dev, "failed to create crtc\n");
return ret;
}
dev->mode_config.min_width = dc->desc->min_width;
dev->mode_config.min_height = dc->desc->min_height;
dev->mode_config.max_width = dc->desc->max_width;
dev->mode_config.max_height = dc->desc->max_height;
dev->mode_config.funcs = &mode_config_funcs;
return 0;
}
static int atmel_hlcdc_dc_load(struct drm_device *dev)
{
struct platform_device *pdev = to_platform_device(dev->dev);
const struct of_device_id *match;
struct atmel_hlcdc_dc *dc;
int ret;
match = of_match_node(atmel_hlcdc_of_match, dev->dev->parent->of_node);
if (!match) {
dev_err(&pdev->dev, "invalid compatible string\n");
return -ENODEV;
}
if (!match->data) {
dev_err(&pdev->dev, "invalid hlcdc description\n");
return -EINVAL;
}
dc = devm_kzalloc(dev->dev, sizeof(*dc), GFP_KERNEL);
if (!dc)
return -ENOMEM;
dc->wq = alloc_ordered_workqueue("atmel-hlcdc-dc", 0);
if (!dc->wq)
return -ENOMEM;
dc->desc = match->data;
dc->hlcdc = dev_get_drvdata(dev->dev->parent);
dev->dev_private = dc;
ret = clk_prepare_enable(dc->hlcdc->periph_clk);
if (ret) {
dev_err(dev->dev, "failed to enable periph_clk\n");
goto err_destroy_wq;
}
pm_runtime_enable(dev->dev);
pm_runtime_put_sync(dev->dev);
ret = atmel_hlcdc_dc_modeset_init(dev);
if (ret < 0) {
dev_err(dev->dev, "failed to initialize mode setting\n");
goto err_periph_clk_disable;
}
ret = drm_vblank_init(dev, 1);
if (ret < 0) {
dev_err(dev->dev, "failed to initialize vblank\n");
goto err_periph_clk_disable;
}
pm_runtime_get_sync(dev->dev);
ret = drm_irq_install(dev, dc->hlcdc->irq);
pm_runtime_put_sync(dev->dev);
if (ret < 0) {
dev_err(dev->dev, "failed to install IRQ handler\n");
goto err_periph_clk_disable;
}
platform_set_drvdata(pdev, dev);
drm_kms_helper_poll_init(dev);
/* force connectors detection */
drm_helper_hpd_irq_event(dev);
return 0;
err_periph_clk_disable:
pm_runtime_disable(dev->dev);
clk_disable_unprepare(dc->hlcdc->periph_clk);
err_destroy_wq:
destroy_workqueue(dc->wq);
return ret;
}
static void atmel_hlcdc_dc_unload(struct drm_device *dev)
{
struct atmel_hlcdc_dc *dc = dev->dev_private;
if (dc->fbdev)
drm_fbdev_cma_fini(dc->fbdev);
flush_workqueue(dc->wq);
drm_kms_helper_poll_fini(dev);
drm_mode_config_cleanup(dev);
drm_vblank_cleanup(dev);
pm_runtime_get_sync(dev->dev);
drm_irq_uninstall(dev);
pm_runtime_put_sync(dev->dev);
dev->dev_private = NULL;
pm_runtime_disable(dev->dev);
clk_disable_unprepare(dc->hlcdc->periph_clk);
destroy_workqueue(dc->wq);
}
static int atmel_hlcdc_dc_connector_plug_all(struct drm_device *dev)
{
struct drm_connector *connector, *failed;
int ret;
mutex_lock(&dev->mode_config.mutex);
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
ret = drm_connector_register(connector);
if (ret) {
failed = connector;
goto err;
}
}
mutex_unlock(&dev->mode_config.mutex);
return 0;
err:
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
if (failed == connector)
break;
drm_connector_unregister(connector);
}
mutex_unlock(&dev->mode_config.mutex);
return ret;
}
static void atmel_hlcdc_dc_connector_unplug_all(struct drm_device *dev)
{
mutex_lock(&dev->mode_config.mutex);
drm_connector_unplug_all(dev);
mutex_unlock(&dev->mode_config.mutex);
}
static void atmel_hlcdc_dc_preclose(struct drm_device *dev,
struct drm_file *file)
{
struct drm_crtc *crtc;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
atmel_hlcdc_crtc_cancel_page_flip(crtc, file);
}
static void atmel_hlcdc_dc_lastclose(struct drm_device *dev)
{
struct atmel_hlcdc_dc *dc = dev->dev_private;
drm_fbdev_cma_restore_mode(dc->fbdev);
}
static int atmel_hlcdc_dc_irq_postinstall(struct drm_device *dev)
{
struct atmel_hlcdc_dc *dc = dev->dev_private;
unsigned int cfg = 0;
int i;
/* Enable interrupts on activated layers */
for (i = 0; i < ATMEL_HLCDC_MAX_LAYERS; i++) {
if (dc->layers[i])
cfg |= ATMEL_HLCDC_LAYER_STATUS(i);
}
regmap_write(dc->hlcdc->regmap, ATMEL_HLCDC_IER, cfg);
return 0;
}
static void atmel_hlcdc_dc_irq_uninstall(struct drm_device *dev)
{
struct atmel_hlcdc_dc *dc = dev->dev_private;
unsigned int isr;
regmap_write(dc->hlcdc->regmap, ATMEL_HLCDC_IDR, 0xffffffff);
regmap_read(dc->hlcdc->regmap, ATMEL_HLCDC_ISR, &isr);
}
static int atmel_hlcdc_dc_enable_vblank(struct drm_device *dev, int crtc)
{
struct atmel_hlcdc_dc *dc = dev->dev_private;
/* Enable SOF (Start Of Frame) interrupt for vblank counting */
regmap_write(dc->hlcdc->regmap, ATMEL_HLCDC_IER, ATMEL_HLCDC_SOF);
return 0;
}
static void atmel_hlcdc_dc_disable_vblank(struct drm_device *dev, int crtc)
{
struct atmel_hlcdc_dc *dc = dev->dev_private;
regmap_write(dc->hlcdc->regmap, ATMEL_HLCDC_IDR, ATMEL_HLCDC_SOF);
}
static const struct file_operations fops = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
#endif
.poll = drm_poll,
.read = drm_read,
.llseek = no_llseek,
.mmap = drm_gem_cma_mmap,
};
static struct drm_driver atmel_hlcdc_dc_driver = {
.driver_features = DRIVER_HAVE_IRQ | DRIVER_GEM | DRIVER_MODESET,
.preclose = atmel_hlcdc_dc_preclose,
.lastclose = atmel_hlcdc_dc_lastclose,
.irq_handler = atmel_hlcdc_dc_irq_handler,
.irq_preinstall = atmel_hlcdc_dc_irq_uninstall,
.irq_postinstall = atmel_hlcdc_dc_irq_postinstall,
.irq_uninstall = atmel_hlcdc_dc_irq_uninstall,
.get_vblank_counter = drm_vblank_count,
.enable_vblank = atmel_hlcdc_dc_enable_vblank,
.disable_vblank = atmel_hlcdc_dc_disable_vblank,
.gem_free_object = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.dumb_create = drm_gem_cma_dumb_create,
.dumb_map_offset = drm_gem_cma_dumb_map_offset,
.dumb_destroy = drm_gem_dumb_destroy,
.fops = &fops,
.name = "atmel-hlcdc",
.desc = "Atmel HLCD Controller DRM",
.date = "20141504",
.major = 1,
.minor = 0,
};
static int atmel_hlcdc_dc_drm_probe(struct platform_device *pdev)
{
struct drm_device *ddev;
int ret;
ddev = drm_dev_alloc(&atmel_hlcdc_dc_driver, &pdev->dev);
if (!ddev)
return -ENOMEM;
ret = drm_dev_set_unique(ddev, dev_name(ddev->dev));
if (ret)
goto err_unref;
ret = atmel_hlcdc_dc_load(ddev);
if (ret)
goto err_unref;
ret = drm_dev_register(ddev, 0);
if (ret)
goto err_unload;
ret = atmel_hlcdc_dc_connector_plug_all(ddev);
if (ret)
goto err_unregister;
return 0;
err_unregister:
drm_dev_unregister(ddev);
err_unload:
atmel_hlcdc_dc_unload(ddev);
err_unref:
drm_dev_unref(ddev);
return ret;
}
static int atmel_hlcdc_dc_drm_remove(struct platform_device *pdev)
{
struct drm_device *ddev = platform_get_drvdata(pdev);
atmel_hlcdc_dc_connector_unplug_all(ddev);
drm_dev_unregister(ddev);
atmel_hlcdc_dc_unload(ddev);
drm_dev_unref(ddev);
return 0;
}
static const struct of_device_id atmel_hlcdc_dc_of_match[] = {
{ .compatible = "atmel,hlcdc-display-controller" },
{ },
};
static struct platform_driver atmel_hlcdc_dc_platform_driver = {
.probe = atmel_hlcdc_dc_drm_probe,
.remove = atmel_hlcdc_dc_drm_remove,
.driver = {
.name = "atmel-hlcdc-display-controller",
.of_match_table = atmel_hlcdc_dc_of_match,
},
};
module_platform_driver(atmel_hlcdc_dc_platform_driver);
MODULE_AUTHOR("Jean-Jacques Hiblot <jjhiblot@traphandler.com>");
MODULE_AUTHOR("Boris Brezillon <boris.brezillon@free-electrons.com>");
MODULE_DESCRIPTION("Atmel HLCDC Display Controller DRM Driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:atmel-hlcdc-dc");

View File

@ -0,0 +1,213 @@
/*
* Copyright (C) 2014 Traphandler
* Copyright (C) 2014 Free Electrons
* Copyright (C) 2014 Atmel
*
* Author: Jean-Jacques Hiblot <jjhiblot@traphandler.com>
* Author: Boris BREZILLON <boris.brezillon@free-electrons.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef DRM_ATMEL_HLCDC_H
#define DRM_ATMEL_HLCDC_H
#include <linux/clk.h>
#include <linux/irqdomain.h>
#include <linux/pwm.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_panel.h>
#include <drm/drmP.h>
#include "atmel_hlcdc_layer.h"
#define ATMEL_HLCDC_MAX_LAYERS 5
/**
* Atmel HLCDC Display Controller description structure.
*
* This structure describe the HLCDC IP capabilities and depends on the
* HLCDC IP version (or Atmel SoC family).
*
* @min_width: minimum width supported by the Display Controller
* @min_height: minimum height supported by the Display Controller
* @max_width: maximum width supported by the Display Controller
* @max_height: maximum height supported by the Display Controller
* @layers: a layer description table describing available layers
* @nlayers: layer description table size
*/
struct atmel_hlcdc_dc_desc {
int min_width;
int min_height;
int max_width;
int max_height;
const struct atmel_hlcdc_layer_desc *layers;
int nlayers;
};
/**
* Atmel HLCDC Plane properties.
*
* This structure stores plane property definitions.
*
* @alpha: alpha blending (or transparency) property
* @rotation: rotation property
*/
struct atmel_hlcdc_plane_properties {
struct drm_property *alpha;
struct drm_property *rotation;
};
/**
* Atmel HLCDC Plane.
*
* @base: base DRM plane structure
* @layer: HLCDC layer structure
* @properties: pointer to the property definitions structure
* @rotation: current rotation status
*/
struct atmel_hlcdc_plane {
struct drm_plane base;
struct atmel_hlcdc_layer layer;
struct atmel_hlcdc_plane_properties *properties;
unsigned int rotation;
};
static inline struct atmel_hlcdc_plane *
drm_plane_to_atmel_hlcdc_plane(struct drm_plane *p)
{
return container_of(p, struct atmel_hlcdc_plane, base);
}
static inline struct atmel_hlcdc_plane *
atmel_hlcdc_layer_to_plane(struct atmel_hlcdc_layer *l)
{
return container_of(l, struct atmel_hlcdc_plane, layer);
}
/**
* Atmel HLCDC Plane update request structure.
*
* @crtc_x: x position of the plane relative to the CRTC
* @crtc_y: y position of the plane relative to the CRTC
* @crtc_w: visible width of the plane
* @crtc_h: visible height of the plane
* @src_x: x buffer position
* @src_y: y buffer position
* @src_w: buffer width
* @src_h: buffer height
* @fb: framebuffer object object
* @bpp: bytes per pixel deduced from pixel_format
* @offsets: offsets to apply to the GEM buffers
* @xstride: value to add to the pixel pointer between each line
* @pstride: value to add to the pixel pointer between each pixel
* @nplanes: number of planes (deduced from pixel_format)
*/
struct atmel_hlcdc_plane_update_req {
int crtc_x;
int crtc_y;
unsigned int crtc_w;
unsigned int crtc_h;
uint32_t src_x;
uint32_t src_y;
uint32_t src_w;
uint32_t src_h;
struct drm_framebuffer *fb;
/* These fields are private and should not be touched */
int bpp[ATMEL_HLCDC_MAX_PLANES];
unsigned int offsets[ATMEL_HLCDC_MAX_PLANES];
int xstride[ATMEL_HLCDC_MAX_PLANES];
int pstride[ATMEL_HLCDC_MAX_PLANES];
int nplanes;
};
/**
* Atmel HLCDC Planes.
*
* This structure stores the instantiated HLCDC Planes and can be accessed by
* the HLCDC Display Controller or the HLCDC CRTC.
*
* @primary: primary plane
* @cursor: hardware cursor plane
* @overlays: overlay plane table
* @noverlays: number of overlay planes
*/
struct atmel_hlcdc_planes {
struct atmel_hlcdc_plane *primary;
struct atmel_hlcdc_plane *cursor;
struct atmel_hlcdc_plane **overlays;
int noverlays;
};
/**
* Atmel HLCDC Display Controller.
*
* @desc: HLCDC Display Controller description
* @hlcdc: pointer to the atmel_hlcdc structure provided by the MFD device
* @fbdev: framebuffer device attached to the Display Controller
* @crtc: CRTC provided by the display controller
* @planes: instantiated planes
* @layers: active HLCDC layer
* @wq: display controller workqueue
*/
struct atmel_hlcdc_dc {
const struct atmel_hlcdc_dc_desc *desc;
struct atmel_hlcdc *hlcdc;
struct drm_fbdev_cma *fbdev;
struct drm_crtc *crtc;
struct atmel_hlcdc_planes *planes;
struct atmel_hlcdc_layer *layers[ATMEL_HLCDC_MAX_LAYERS];
struct workqueue_struct *wq;
};
extern struct atmel_hlcdc_formats atmel_hlcdc_plane_rgb_formats;
extern struct atmel_hlcdc_formats atmel_hlcdc_plane_rgb_and_yuv_formats;
int atmel_hlcdc_dc_mode_valid(struct atmel_hlcdc_dc *dc,
struct drm_display_mode *mode);
struct atmel_hlcdc_planes *
atmel_hlcdc_create_planes(struct drm_device *dev);
int atmel_hlcdc_plane_prepare_update_req(struct drm_plane *p,
struct atmel_hlcdc_plane_update_req *req,
const struct drm_display_mode *mode);
int atmel_hlcdc_plane_apply_update_req(struct drm_plane *p,
struct atmel_hlcdc_plane_update_req *req);
int atmel_hlcdc_plane_update_with_mode(struct drm_plane *p,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int crtc_x, int crtc_y,
unsigned int crtc_w,
unsigned int crtc_h,
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h,
const struct drm_display_mode *mode);
void atmel_hlcdc_crtc_irq(struct drm_crtc *c);
void atmel_hlcdc_crtc_cancel_page_flip(struct drm_crtc *crtc,
struct drm_file *file);
int atmel_hlcdc_crtc_create(struct drm_device *dev);
int atmel_hlcdc_create_outputs(struct drm_device *dev);
#endif /* DRM_ATMEL_HLCDC_H */

View File

@ -0,0 +1,667 @@
/*
* Copyright (C) 2014 Free Electrons
* Copyright (C) 2014 Atmel
*
* Author: Boris BREZILLON <boris.brezillon@free-electrons.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include "atmel_hlcdc_dc.h"
static void
atmel_hlcdc_layer_fb_flip_release(struct drm_flip_work *work, void *val)
{
struct atmel_hlcdc_layer_fb_flip *flip = val;
if (flip->fb)
drm_framebuffer_unreference(flip->fb);
kfree(flip);
}
static void
atmel_hlcdc_layer_fb_flip_destroy(struct atmel_hlcdc_layer_fb_flip *flip)
{
if (flip->fb)
drm_framebuffer_unreference(flip->fb);
kfree(flip->task);
kfree(flip);
}
static void
atmel_hlcdc_layer_fb_flip_release_queue(struct atmel_hlcdc_layer *layer,
struct atmel_hlcdc_layer_fb_flip *flip)
{
int i;
if (!flip)
return;
for (i = 0; i < layer->max_planes; i++) {
if (!flip->dscrs[i])
break;
flip->dscrs[i]->status = 0;
flip->dscrs[i] = NULL;
}
drm_flip_work_queue_task(&layer->gc, flip->task);
drm_flip_work_commit(&layer->gc, layer->wq);
}
static void atmel_hlcdc_layer_update_reset(struct atmel_hlcdc_layer *layer,
int id)
{
struct atmel_hlcdc_layer_update *upd = &layer->update;
struct atmel_hlcdc_layer_update_slot *slot;
if (id < 0 || id > 1)
return;
slot = &upd->slots[id];
bitmap_clear(slot->updated_configs, 0, layer->desc->nconfigs);
memset(slot->configs, 0,
sizeof(*slot->configs) * layer->desc->nconfigs);
if (slot->fb_flip) {
atmel_hlcdc_layer_fb_flip_release_queue(layer, slot->fb_flip);
slot->fb_flip = NULL;
}
}
static void atmel_hlcdc_layer_update_apply(struct atmel_hlcdc_layer *layer)
{
struct atmel_hlcdc_layer_dma_channel *dma = &layer->dma;
const struct atmel_hlcdc_layer_desc *desc = layer->desc;
struct atmel_hlcdc_layer_update *upd = &layer->update;
struct regmap *regmap = layer->hlcdc->regmap;
struct atmel_hlcdc_layer_update_slot *slot;
struct atmel_hlcdc_layer_fb_flip *fb_flip;
struct atmel_hlcdc_dma_channel_dscr *dscr;
unsigned int cfg;
u32 action = 0;
int i = 0;
if (upd->pending < 0 || upd->pending > 1)
return;
slot = &upd->slots[upd->pending];
for_each_set_bit(cfg, slot->updated_configs, layer->desc->nconfigs) {
regmap_write(regmap,
desc->regs_offset +
ATMEL_HLCDC_LAYER_CFG(layer, cfg),
slot->configs[cfg]);
action |= ATMEL_HLCDC_LAYER_UPDATE;
}
fb_flip = slot->fb_flip;
if (!fb_flip->fb)
goto apply;
if (dma->status == ATMEL_HLCDC_LAYER_DISABLED) {
for (i = 0; i < fb_flip->ngems; i++) {
dscr = fb_flip->dscrs[i];
dscr->ctrl = ATMEL_HLCDC_LAYER_DFETCH |
ATMEL_HLCDC_LAYER_DMA_IRQ |
ATMEL_HLCDC_LAYER_ADD_IRQ |
ATMEL_HLCDC_LAYER_DONE_IRQ;
regmap_write(regmap,
desc->regs_offset +
ATMEL_HLCDC_LAYER_PLANE_ADDR(i),
dscr->addr);
regmap_write(regmap,
desc->regs_offset +
ATMEL_HLCDC_LAYER_PLANE_CTRL(i),
dscr->ctrl);
regmap_write(regmap,
desc->regs_offset +
ATMEL_HLCDC_LAYER_PLANE_NEXT(i),
dscr->next);
}
action |= ATMEL_HLCDC_LAYER_DMA_CHAN;
dma->status = ATMEL_HLCDC_LAYER_ENABLED;
} else {
for (i = 0; i < fb_flip->ngems; i++) {
dscr = fb_flip->dscrs[i];
dscr->ctrl = ATMEL_HLCDC_LAYER_DFETCH |
ATMEL_HLCDC_LAYER_DMA_IRQ |
ATMEL_HLCDC_LAYER_DSCR_IRQ |
ATMEL_HLCDC_LAYER_DONE_IRQ;
regmap_write(regmap,
desc->regs_offset +
ATMEL_HLCDC_LAYER_PLANE_HEAD(i),
dscr->next);
}
action |= ATMEL_HLCDC_LAYER_A2Q;
}
/* Release unneeded descriptors */
for (i = fb_flip->ngems; i < layer->max_planes; i++) {
fb_flip->dscrs[i]->status = 0;
fb_flip->dscrs[i] = NULL;
}
dma->queue = fb_flip;
slot->fb_flip = NULL;
apply:
if (action)
regmap_write(regmap,
desc->regs_offset + ATMEL_HLCDC_LAYER_CHER,
action);
atmel_hlcdc_layer_update_reset(layer, upd->pending);
upd->pending = -1;
}
void atmel_hlcdc_layer_irq(struct atmel_hlcdc_layer *layer)
{
struct atmel_hlcdc_layer_dma_channel *dma = &layer->dma;
const struct atmel_hlcdc_layer_desc *desc = layer->desc;
struct regmap *regmap = layer->hlcdc->regmap;
struct atmel_hlcdc_layer_fb_flip *flip;
unsigned long flags;
unsigned int isr, imr;
unsigned int status;
unsigned int plane_status;
u32 flip_status;
int i;
regmap_read(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_IMR, &imr);
regmap_read(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_ISR, &isr);
status = imr & isr;
if (!status)
return;
spin_lock_irqsave(&layer->lock, flags);
flip = dma->queue ? dma->queue : dma->cur;
if (!flip) {
spin_unlock_irqrestore(&layer->lock, flags);
return;
}
/*
* Set LOADED and DONE flags: they'll be cleared if at least one
* memory plane is not LOADED or DONE.
*/
flip_status = ATMEL_HLCDC_DMA_CHANNEL_DSCR_LOADED |
ATMEL_HLCDC_DMA_CHANNEL_DSCR_DONE;
for (i = 0; i < flip->ngems; i++) {
plane_status = (status >> (8 * i));
if (plane_status &
(ATMEL_HLCDC_LAYER_ADD_IRQ |
ATMEL_HLCDC_LAYER_DSCR_IRQ) &
~flip->dscrs[i]->ctrl) {
flip->dscrs[i]->status |=
ATMEL_HLCDC_DMA_CHANNEL_DSCR_LOADED;
flip->dscrs[i]->ctrl |=
ATMEL_HLCDC_LAYER_ADD_IRQ |
ATMEL_HLCDC_LAYER_DSCR_IRQ;
}
if (plane_status &
ATMEL_HLCDC_LAYER_DONE_IRQ &
~flip->dscrs[i]->ctrl) {
flip->dscrs[i]->status |=
ATMEL_HLCDC_DMA_CHANNEL_DSCR_DONE;
flip->dscrs[i]->ctrl |=
ATMEL_HLCDC_LAYER_DONE_IRQ;
}
if (plane_status & ATMEL_HLCDC_LAYER_OVR_IRQ)
flip->dscrs[i]->status |=
ATMEL_HLCDC_DMA_CHANNEL_DSCR_OVERRUN;
/*
* Clear LOADED and DONE flags if the memory plane is either
* not LOADED or not DONE.
*/
if (!(flip->dscrs[i]->status &
ATMEL_HLCDC_DMA_CHANNEL_DSCR_LOADED))
flip_status &= ~ATMEL_HLCDC_DMA_CHANNEL_DSCR_LOADED;
if (!(flip->dscrs[i]->status &
ATMEL_HLCDC_DMA_CHANNEL_DSCR_DONE))
flip_status &= ~ATMEL_HLCDC_DMA_CHANNEL_DSCR_DONE;
/*
* An overrun on one memory plane impact the whole framebuffer
* transfer, hence we set the OVERRUN flag as soon as there's
* one memory plane reporting such an overrun.
*/
flip_status |= flip->dscrs[i]->status &
ATMEL_HLCDC_DMA_CHANNEL_DSCR_OVERRUN;
}
/* Get changed bits */
flip_status ^= flip->status;
flip->status |= flip_status;
if (flip_status & ATMEL_HLCDC_DMA_CHANNEL_DSCR_LOADED) {
atmel_hlcdc_layer_fb_flip_release_queue(layer, dma->cur);
dma->cur = dma->queue;
dma->queue = NULL;
}
if (flip_status & ATMEL_HLCDC_DMA_CHANNEL_DSCR_DONE) {
atmel_hlcdc_layer_fb_flip_release_queue(layer, dma->cur);
dma->cur = NULL;
}
if (flip_status & ATMEL_HLCDC_DMA_CHANNEL_DSCR_OVERRUN) {
regmap_write(regmap,
desc->regs_offset + ATMEL_HLCDC_LAYER_CHDR,
ATMEL_HLCDC_LAYER_RST);
if (dma->queue)
atmel_hlcdc_layer_fb_flip_release_queue(layer,
dma->queue);
if (dma->cur)
atmel_hlcdc_layer_fb_flip_release_queue(layer,
dma->cur);
dma->cur = NULL;
dma->queue = NULL;
}
if (!dma->queue) {
atmel_hlcdc_layer_update_apply(layer);
if (!dma->cur)
dma->status = ATMEL_HLCDC_LAYER_DISABLED;
}
spin_unlock_irqrestore(&layer->lock, flags);
}
int atmel_hlcdc_layer_disable(struct atmel_hlcdc_layer *layer)
{
struct atmel_hlcdc_layer_dma_channel *dma = &layer->dma;
struct atmel_hlcdc_layer_update *upd = &layer->update;
struct regmap *regmap = layer->hlcdc->regmap;
const struct atmel_hlcdc_layer_desc *desc = layer->desc;
unsigned long flags;
unsigned int isr;
spin_lock_irqsave(&layer->lock, flags);
/* Disable the layer */
regmap_write(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_CHDR,
ATMEL_HLCDC_LAYER_RST);
/* Clear all pending interrupts */
regmap_read(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_ISR, &isr);
/* Discard current and queued framebuffer transfers. */
if (dma->cur) {
atmel_hlcdc_layer_fb_flip_release_queue(layer, dma->cur);
dma->cur = NULL;
}
if (dma->queue) {
atmel_hlcdc_layer_fb_flip_release_queue(layer, dma->queue);
dma->queue = NULL;
}
/*
* Then discard the pending update request (if any) to prevent
* DMA irq handler from restarting the DMA channel after it has
* been disabled.
*/
if (upd->pending >= 0) {
atmel_hlcdc_layer_update_reset(layer, upd->pending);
upd->pending = -1;
}
dma->status = ATMEL_HLCDC_LAYER_DISABLED;
spin_unlock_irqrestore(&layer->lock, flags);
return 0;
}
int atmel_hlcdc_layer_update_start(struct atmel_hlcdc_layer *layer)
{
struct atmel_hlcdc_layer_dma_channel *dma = &layer->dma;
struct atmel_hlcdc_layer_update *upd = &layer->update;
struct regmap *regmap = layer->hlcdc->regmap;
struct atmel_hlcdc_layer_fb_flip *fb_flip;
struct atmel_hlcdc_layer_update_slot *slot;
unsigned long flags;
int i, j = 0;
fb_flip = kzalloc(sizeof(*fb_flip), GFP_KERNEL);
if (!fb_flip)
return -ENOMEM;
fb_flip->task = drm_flip_work_allocate_task(fb_flip, GFP_KERNEL);
if (!fb_flip->task) {
kfree(fb_flip);
return -ENOMEM;
}
spin_lock_irqsave(&layer->lock, flags);
upd->next = upd->pending ? 0 : 1;
slot = &upd->slots[upd->next];
for (i = 0; i < layer->max_planes * 4; i++) {
if (!dma->dscrs[i].status) {
fb_flip->dscrs[j++] = &dma->dscrs[i];
dma->dscrs[i].status =
ATMEL_HLCDC_DMA_CHANNEL_DSCR_RESERVED;
if (j == layer->max_planes)
break;
}
}
if (j < layer->max_planes) {
for (i = 0; i < j; i++)
fb_flip->dscrs[i]->status = 0;
}
if (j < layer->max_planes) {
spin_unlock_irqrestore(&layer->lock, flags);
atmel_hlcdc_layer_fb_flip_destroy(fb_flip);
return -EBUSY;
}
slot->fb_flip = fb_flip;
if (upd->pending >= 0) {
memcpy(slot->configs,
upd->slots[upd->pending].configs,
layer->desc->nconfigs * sizeof(u32));
memcpy(slot->updated_configs,
upd->slots[upd->pending].updated_configs,
DIV_ROUND_UP(layer->desc->nconfigs,
BITS_PER_BYTE * sizeof(unsigned long)) *
sizeof(unsigned long));
slot->fb_flip->fb = upd->slots[upd->pending].fb_flip->fb;
if (upd->slots[upd->pending].fb_flip->fb) {
slot->fb_flip->fb =
upd->slots[upd->pending].fb_flip->fb;
slot->fb_flip->ngems =
upd->slots[upd->pending].fb_flip->ngems;
drm_framebuffer_reference(slot->fb_flip->fb);
}
} else {
regmap_bulk_read(regmap,
layer->desc->regs_offset +
ATMEL_HLCDC_LAYER_CFG(layer, 0),
upd->slots[upd->next].configs,
layer->desc->nconfigs);
}
spin_unlock_irqrestore(&layer->lock, flags);
return 0;
}
void atmel_hlcdc_layer_update_rollback(struct atmel_hlcdc_layer *layer)
{
struct atmel_hlcdc_layer_update *upd = &layer->update;
atmel_hlcdc_layer_update_reset(layer, upd->next);
upd->next = -1;
}
void atmel_hlcdc_layer_update_set_fb(struct atmel_hlcdc_layer *layer,
struct drm_framebuffer *fb,
unsigned int *offsets)
{
struct atmel_hlcdc_layer_update *upd = &layer->update;
struct atmel_hlcdc_layer_fb_flip *fb_flip;
struct atmel_hlcdc_layer_update_slot *slot;
struct atmel_hlcdc_dma_channel_dscr *dscr;
struct drm_framebuffer *old_fb;
int nplanes = 0;
int i;
if (upd->next < 0 || upd->next > 1)
return;
if (fb)
nplanes = drm_format_num_planes(fb->pixel_format);
if (nplanes > layer->max_planes)
return;
slot = &upd->slots[upd->next];
fb_flip = slot->fb_flip;
old_fb = slot->fb_flip->fb;
for (i = 0; i < nplanes; i++) {
struct drm_gem_cma_object *gem;
dscr = slot->fb_flip->dscrs[i];
gem = drm_fb_cma_get_gem_obj(fb, i);
dscr->addr = gem->paddr + offsets[i];
}
fb_flip->ngems = nplanes;
fb_flip->fb = fb;
if (fb)
drm_framebuffer_reference(fb);
if (old_fb)
drm_framebuffer_unreference(old_fb);
}
void atmel_hlcdc_layer_update_cfg(struct atmel_hlcdc_layer *layer, int cfg,
u32 mask, u32 val)
{
struct atmel_hlcdc_layer_update *upd = &layer->update;
struct atmel_hlcdc_layer_update_slot *slot;
if (upd->next < 0 || upd->next > 1)
return;
if (cfg >= layer->desc->nconfigs)
return;
slot = &upd->slots[upd->next];
slot->configs[cfg] &= ~mask;
slot->configs[cfg] |= (val & mask);
set_bit(cfg, slot->updated_configs);
}
void atmel_hlcdc_layer_update_commit(struct atmel_hlcdc_layer *layer)
{
struct atmel_hlcdc_layer_dma_channel *dma = &layer->dma;
struct atmel_hlcdc_layer_update *upd = &layer->update;
struct atmel_hlcdc_layer_update_slot *slot;
unsigned long flags;
if (upd->next < 0 || upd->next > 1)
return;
slot = &upd->slots[upd->next];
spin_lock_irqsave(&layer->lock, flags);
/*
* Release pending update request and replace it by the new one.
*/
if (upd->pending >= 0)
atmel_hlcdc_layer_update_reset(layer, upd->pending);
upd->pending = upd->next;
upd->next = -1;
if (!dma->queue)
atmel_hlcdc_layer_update_apply(layer);
spin_unlock_irqrestore(&layer->lock, flags);
upd->next = -1;
}
static int atmel_hlcdc_layer_dma_init(struct drm_device *dev,
struct atmel_hlcdc_layer *layer)
{
struct atmel_hlcdc_layer_dma_channel *dma = &layer->dma;
dma_addr_t dma_addr;
int i;
dma->dscrs = dma_alloc_coherent(dev->dev,
layer->max_planes * 4 *
sizeof(*dma->dscrs),
&dma_addr, GFP_KERNEL);
if (!dma->dscrs)
return -ENOMEM;
for (i = 0; i < layer->max_planes * 4; i++) {
struct atmel_hlcdc_dma_channel_dscr *dscr = &dma->dscrs[i];
dscr->next = dma_addr + (i * sizeof(*dscr));
}
return 0;
}
static void atmel_hlcdc_layer_dma_cleanup(struct drm_device *dev,
struct atmel_hlcdc_layer *layer)
{
struct atmel_hlcdc_layer_dma_channel *dma = &layer->dma;
int i;
for (i = 0; i < layer->max_planes * 4; i++) {
struct atmel_hlcdc_dma_channel_dscr *dscr = &dma->dscrs[i];
dscr->status = 0;
}
dma_free_coherent(dev->dev, layer->max_planes * 4 *
sizeof(*dma->dscrs), dma->dscrs,
dma->dscrs[0].next);
}
static int atmel_hlcdc_layer_update_init(struct drm_device *dev,
struct atmel_hlcdc_layer *layer,
const struct atmel_hlcdc_layer_desc *desc)
{
struct atmel_hlcdc_layer_update *upd = &layer->update;
int updated_size;
void *buffer;
int i;
updated_size = DIV_ROUND_UP(desc->nconfigs,
BITS_PER_BYTE *
sizeof(unsigned long));
buffer = devm_kzalloc(dev->dev,
((desc->nconfigs * sizeof(u32)) +
(updated_size * sizeof(unsigned long))) * 2,
GFP_KERNEL);
if (!buffer)
return -ENOMEM;
for (i = 0; i < 2; i++) {
upd->slots[i].updated_configs = buffer;
buffer += updated_size * sizeof(unsigned long);
upd->slots[i].configs = buffer;
buffer += desc->nconfigs * sizeof(u32);
}
upd->pending = -1;
upd->next = -1;
return 0;
}
int atmel_hlcdc_layer_init(struct drm_device *dev,
struct atmel_hlcdc_layer *layer,
const struct atmel_hlcdc_layer_desc *desc)
{
struct atmel_hlcdc_dc *dc = dev->dev_private;
struct regmap *regmap = dc->hlcdc->regmap;
unsigned int tmp;
int ret;
int i;
layer->hlcdc = dc->hlcdc;
layer->wq = dc->wq;
layer->desc = desc;
regmap_write(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_CHDR,
ATMEL_HLCDC_LAYER_RST);
for (i = 0; i < desc->formats->nformats; i++) {
int nplanes = drm_format_num_planes(desc->formats->formats[i]);
if (nplanes > layer->max_planes)
layer->max_planes = nplanes;
}
spin_lock_init(&layer->lock);
drm_flip_work_init(&layer->gc, desc->name,
atmel_hlcdc_layer_fb_flip_release);
ret = atmel_hlcdc_layer_dma_init(dev, layer);
if (ret)
return ret;
ret = atmel_hlcdc_layer_update_init(dev, layer, desc);
if (ret)
return ret;
/* Flush Status Register */
regmap_write(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_IDR,
0xffffffff);
regmap_read(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_ISR,
&tmp);
tmp = 0;
for (i = 0; i < layer->max_planes; i++)
tmp |= (ATMEL_HLCDC_LAYER_DMA_IRQ |
ATMEL_HLCDC_LAYER_DSCR_IRQ |
ATMEL_HLCDC_LAYER_ADD_IRQ |
ATMEL_HLCDC_LAYER_DONE_IRQ |
ATMEL_HLCDC_LAYER_OVR_IRQ) << (8 * i);
regmap_write(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_IER, tmp);
return 0;
}
void atmel_hlcdc_layer_cleanup(struct drm_device *dev,
struct atmel_hlcdc_layer *layer)
{
const struct atmel_hlcdc_layer_desc *desc = layer->desc;
struct regmap *regmap = layer->hlcdc->regmap;
regmap_write(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_IDR,
0xffffffff);
regmap_write(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_CHDR,
ATMEL_HLCDC_LAYER_RST);
atmel_hlcdc_layer_dma_cleanup(dev, layer);
drm_flip_work_cleanup(&layer->gc);
}

View File

@ -0,0 +1,398 @@
/*
* Copyright (C) 2014 Free Electrons
* Copyright (C) 2014 Atmel
*
* Author: Boris BREZILLON <boris.brezillon@free-electrons.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef DRM_ATMEL_HLCDC_LAYER_H
#define DRM_ATMEL_HLCDC_LAYER_H
#include <linux/mfd/atmel-hlcdc.h>
#include <drm/drm_crtc.h>
#include <drm/drm_flip_work.h>
#include <drm/drmP.h>
#define ATMEL_HLCDC_LAYER_CHER 0x0
#define ATMEL_HLCDC_LAYER_CHDR 0x4
#define ATMEL_HLCDC_LAYER_CHSR 0x8
#define ATMEL_HLCDC_LAYER_DMA_CHAN BIT(0)
#define ATMEL_HLCDC_LAYER_UPDATE BIT(1)
#define ATMEL_HLCDC_LAYER_A2Q BIT(2)
#define ATMEL_HLCDC_LAYER_RST BIT(8)
#define ATMEL_HLCDC_LAYER_IER 0xc
#define ATMEL_HLCDC_LAYER_IDR 0x10
#define ATMEL_HLCDC_LAYER_IMR 0x14
#define ATMEL_HLCDC_LAYER_ISR 0x18
#define ATMEL_HLCDC_LAYER_DFETCH BIT(0)
#define ATMEL_HLCDC_LAYER_LFETCH BIT(1)
#define ATMEL_HLCDC_LAYER_DMA_IRQ BIT(2)
#define ATMEL_HLCDC_LAYER_DSCR_IRQ BIT(3)
#define ATMEL_HLCDC_LAYER_ADD_IRQ BIT(4)
#define ATMEL_HLCDC_LAYER_DONE_IRQ BIT(5)
#define ATMEL_HLCDC_LAYER_OVR_IRQ BIT(6)
#define ATMEL_HLCDC_LAYER_PLANE_HEAD(n) (((n) * 0x10) + 0x1c)
#define ATMEL_HLCDC_LAYER_PLANE_ADDR(n) (((n) * 0x10) + 0x20)
#define ATMEL_HLCDC_LAYER_PLANE_CTRL(n) (((n) * 0x10) + 0x24)
#define ATMEL_HLCDC_LAYER_PLANE_NEXT(n) (((n) * 0x10) + 0x28)
#define ATMEL_HLCDC_LAYER_CFG(p, c) (((c) * 4) + ((p)->max_planes * 0x10) + 0x1c)
#define ATMEL_HLCDC_LAYER_DMA_CFG_ID 0
#define ATMEL_HLCDC_LAYER_DMA_CFG(p) ATMEL_HLCDC_LAYER_CFG(p, ATMEL_HLCDC_LAYER_DMA_CFG_ID)
#define ATMEL_HLCDC_LAYER_DMA_SIF BIT(0)
#define ATMEL_HLCDC_LAYER_DMA_BLEN_MASK GENMASK(5, 4)
#define ATMEL_HLCDC_LAYER_DMA_BLEN_SINGLE (0 << 4)
#define ATMEL_HLCDC_LAYER_DMA_BLEN_INCR4 (1 << 4)
#define ATMEL_HLCDC_LAYER_DMA_BLEN_INCR8 (2 << 4)
#define ATMEL_HLCDC_LAYER_DMA_BLEN_INCR16 (3 << 4)
#define ATMEL_HLCDC_LAYER_DMA_DLBO BIT(8)
#define ATMEL_HLCDC_LAYER_DMA_ROTDIS BIT(12)
#define ATMEL_HLCDC_LAYER_DMA_LOCKDIS BIT(13)
#define ATMEL_HLCDC_LAYER_FORMAT_CFG_ID 1
#define ATMEL_HLCDC_LAYER_FORMAT_CFG(p) ATMEL_HLCDC_LAYER_CFG(p, ATMEL_HLCDC_LAYER_FORMAT_CFG_ID)
#define ATMEL_HLCDC_LAYER_RGB (0 << 0)
#define ATMEL_HLCDC_LAYER_CLUT (1 << 0)
#define ATMEL_HLCDC_LAYER_YUV (2 << 0)
#define ATMEL_HLCDC_RGB_MODE(m) (((m) & 0xf) << 4)
#define ATMEL_HLCDC_CLUT_MODE(m) (((m) & 0x3) << 8)
#define ATMEL_HLCDC_YUV_MODE(m) (((m) & 0xf) << 12)
#define ATMEL_HLCDC_YUV422ROT BIT(16)
#define ATMEL_HLCDC_YUV422SWP BIT(17)
#define ATMEL_HLCDC_DSCALEOPT BIT(20)
#define ATMEL_HLCDC_XRGB4444_MODE (ATMEL_HLCDC_LAYER_RGB | ATMEL_HLCDC_RGB_MODE(0))
#define ATMEL_HLCDC_ARGB4444_MODE (ATMEL_HLCDC_LAYER_RGB | ATMEL_HLCDC_RGB_MODE(1))
#define ATMEL_HLCDC_RGBA4444_MODE (ATMEL_HLCDC_LAYER_RGB | ATMEL_HLCDC_RGB_MODE(2))
#define ATMEL_HLCDC_RGB565_MODE (ATMEL_HLCDC_LAYER_RGB | ATMEL_HLCDC_RGB_MODE(3))
#define ATMEL_HLCDC_ARGB1555_MODE (ATMEL_HLCDC_LAYER_RGB | ATMEL_HLCDC_RGB_MODE(4))
#define ATMEL_HLCDC_XRGB8888_MODE (ATMEL_HLCDC_LAYER_RGB | ATMEL_HLCDC_RGB_MODE(9))
#define ATMEL_HLCDC_RGB888_MODE (ATMEL_HLCDC_LAYER_RGB | ATMEL_HLCDC_RGB_MODE(10))
#define ATMEL_HLCDC_ARGB8888_MODE (ATMEL_HLCDC_LAYER_RGB | ATMEL_HLCDC_RGB_MODE(12))
#define ATMEL_HLCDC_RGBA8888_MODE (ATMEL_HLCDC_LAYER_RGB | ATMEL_HLCDC_RGB_MODE(13))
#define ATMEL_HLCDC_AYUV_MODE (ATMEL_HLCDC_LAYER_YUV | ATMEL_HLCDC_YUV_MODE(0))
#define ATMEL_HLCDC_YUYV_MODE (ATMEL_HLCDC_LAYER_YUV | ATMEL_HLCDC_YUV_MODE(1))
#define ATMEL_HLCDC_UYVY_MODE (ATMEL_HLCDC_LAYER_YUV | ATMEL_HLCDC_YUV_MODE(2))
#define ATMEL_HLCDC_YVYU_MODE (ATMEL_HLCDC_LAYER_YUV | ATMEL_HLCDC_YUV_MODE(3))
#define ATMEL_HLCDC_VYUY_MODE (ATMEL_HLCDC_LAYER_YUV | ATMEL_HLCDC_YUV_MODE(4))
#define ATMEL_HLCDC_NV61_MODE (ATMEL_HLCDC_LAYER_YUV | ATMEL_HLCDC_YUV_MODE(5))
#define ATMEL_HLCDC_YUV422_MODE (ATMEL_HLCDC_LAYER_YUV | ATMEL_HLCDC_YUV_MODE(6))
#define ATMEL_HLCDC_NV21_MODE (ATMEL_HLCDC_LAYER_YUV | ATMEL_HLCDC_YUV_MODE(7))
#define ATMEL_HLCDC_YUV420_MODE (ATMEL_HLCDC_LAYER_YUV | ATMEL_HLCDC_YUV_MODE(8))
#define ATMEL_HLCDC_LAYER_POS_CFG(p) ATMEL_HLCDC_LAYER_CFG(p, (p)->desc->layout.pos)
#define ATMEL_HLCDC_LAYER_SIZE_CFG(p) ATMEL_HLCDC_LAYER_CFG(p, (p)->desc->layout.size)
#define ATMEL_HLCDC_LAYER_MEMSIZE_CFG(p) ATMEL_HLCDC_LAYER_CFG(p, (p)->desc->layout.memsize)
#define ATMEL_HLCDC_LAYER_XSTRIDE_CFG(p) ATMEL_HLCDC_LAYER_CFG(p, (p)->desc->layout.xstride)
#define ATMEL_HLCDC_LAYER_PSTRIDE_CFG(p) ATMEL_HLCDC_LAYER_CFG(p, (p)->desc->layout.pstride)
#define ATMEL_HLCDC_LAYER_DFLTCOLOR_CFG(p) ATMEL_HLCDC_LAYER_CFG(p, (p)->desc->layout.default_color)
#define ATMEL_HLCDC_LAYER_CRKEY_CFG(p) ATMEL_HLCDC_LAYER_CFG(p, (p)->desc->layout.chroma_key)
#define ATMEL_HLCDC_LAYER_CRKEY_MASK_CFG(p) ATMEL_HLCDC_LAYER_CFG(p, (p)->desc->layout.chroma_key_mask)
#define ATMEL_HLCDC_LAYER_GENERAL_CFG(p) ATMEL_HLCDC_LAYER_CFG(p, (p)->desc->layout.general_config)
#define ATMEL_HLCDC_LAYER_CRKEY BIT(0)
#define ATMEL_HLCDC_LAYER_INV BIT(1)
#define ATMEL_HLCDC_LAYER_ITER2BL BIT(2)
#define ATMEL_HLCDC_LAYER_ITER BIT(3)
#define ATMEL_HLCDC_LAYER_REVALPHA BIT(4)
#define ATMEL_HLCDC_LAYER_GAEN BIT(5)
#define ATMEL_HLCDC_LAYER_LAEN BIT(6)
#define ATMEL_HLCDC_LAYER_OVR BIT(7)
#define ATMEL_HLCDC_LAYER_DMA BIT(8)
#define ATMEL_HLCDC_LAYER_REP BIT(9)
#define ATMEL_HLCDC_LAYER_DSTKEY BIT(10)
#define ATMEL_HLCDC_LAYER_DISCEN BIT(11)
#define ATMEL_HLCDC_LAYER_GA_SHIFT 16
#define ATMEL_HLCDC_LAYER_GA_MASK GENMASK(23, ATMEL_HLCDC_LAYER_GA_SHIFT)
#define ATMEL_HLCDC_LAYER_CSC_CFG(p, o) ATMEL_HLCDC_LAYER_CFG(p, (p)->desc->layout.csc + o)
#define ATMEL_HLCDC_LAYER_DISC_POS_CFG(p) ATMEL_HLCDC_LAYER_CFG(p, (p)->desc->layout.disc_pos)
#define ATMEL_HLCDC_LAYER_DISC_SIZE_CFG(p) ATMEL_HLCDC_LAYER_CFG(p, (p)->desc->layout.disc_size)
#define ATMEL_HLCDC_MAX_PLANES 3
#define ATMEL_HLCDC_DMA_CHANNEL_DSCR_RESERVED BIT(0)
#define ATMEL_HLCDC_DMA_CHANNEL_DSCR_LOADED BIT(1)
#define ATMEL_HLCDC_DMA_CHANNEL_DSCR_DONE BIT(2)
#define ATMEL_HLCDC_DMA_CHANNEL_DSCR_OVERRUN BIT(3)
/**
* Atmel HLCDC Layer registers layout structure
*
* Each HLCDC layer has its own register organization and a given register
* can be placed differently on 2 different layers depending on its
* capabilities.
* This structure stores common registers layout for a given layer and is
* used by HLCDC layer code to choose the appropriate register to write to
* or to read from.
*
* For all fields, a value of zero means "unsupported".
*
* See Atmel's datasheet for a detailled description of these registers.
*
* @xstride: xstride registers
* @pstride: pstride registers
* @pos: position register
* @size: displayed size register
* @memsize: memory size register
* @default_color: default color register
* @chroma_key: chroma key register
* @chroma_key_mask: chroma key mask register
* @general_config: general layer config register
* @disc_pos: discard area position register
* @disc_size: discard area size register
* @csc: color space conversion register
*/
struct atmel_hlcdc_layer_cfg_layout {
int xstride[ATMEL_HLCDC_MAX_PLANES];
int pstride[ATMEL_HLCDC_MAX_PLANES];
int pos;
int size;
int memsize;
int default_color;
int chroma_key;
int chroma_key_mask;
int general_config;
int disc_pos;
int disc_size;
int csc;
};
/**
* Atmel HLCDC framebuffer flip structure
*
* This structure is allocated when someone asked for a layer update (most
* likely a DRM plane update, either primary, overlay or cursor plane) and
* released when the layer do not need to reference the framebuffer object
* anymore (i.e. the layer was disabled or updated).
*
* @dscrs: DMA descriptors
* @fb: the referenced framebuffer object
* @ngems: number of GEM objects referenced by the fb element
* @status: fb flip operation status
*/
struct atmel_hlcdc_layer_fb_flip {
struct atmel_hlcdc_dma_channel_dscr *dscrs[ATMEL_HLCDC_MAX_PLANES];
struct drm_flip_task *task;
struct drm_framebuffer *fb;
int ngems;
u32 status;
};
/**
* Atmel HLCDC DMA descriptor structure
*
* This structure is used by the HLCDC DMA engine to schedule a DMA transfer.
*
* The structure fields must remain in this specific order, because they're
* used by the HLCDC DMA engine, which expect them in this order.
* HLCDC DMA descriptors must be aligned on 64 bits.
*
* @addr: buffer DMA address
* @ctrl: DMA transfer options
* @next: next DMA descriptor to fetch
* @gem_flip: the attached gem_flip operation
*/
struct atmel_hlcdc_dma_channel_dscr {
dma_addr_t addr;
u32 ctrl;
dma_addr_t next;
u32 status;
} __aligned(sizeof(u64));
/**
* Atmel HLCDC layer types
*/
enum atmel_hlcdc_layer_type {
ATMEL_HLCDC_BASE_LAYER,
ATMEL_HLCDC_OVERLAY_LAYER,
ATMEL_HLCDC_CURSOR_LAYER,
ATMEL_HLCDC_PP_LAYER,
};
/**
* Atmel HLCDC Supported formats structure
*
* This structure list all the formats supported by a given layer.
*
* @nformats: number of supported formats
* @formats: supported formats
*/
struct atmel_hlcdc_formats {
int nformats;
uint32_t *formats;
};
/**
* Atmel HLCDC Layer description structure
*
* This structure describe the capabilities provided by a given layer.
*
* @name: layer name
* @type: layer type
* @id: layer id
* @regs_offset: offset of the layer registers from the HLCDC registers base
* @nconfigs: number of config registers provided by this layer
* @formats: supported formats
* @layout: config registers layout
* @max_width: maximum width supported by this layer (0 means unlimited)
* @max_height: maximum height supported by this layer (0 means unlimited)
*/
struct atmel_hlcdc_layer_desc {
const char *name;
enum atmel_hlcdc_layer_type type;
int id;
int regs_offset;
int nconfigs;
struct atmel_hlcdc_formats *formats;
struct atmel_hlcdc_layer_cfg_layout layout;
int max_width;
int max_height;
};
/**
* Atmel HLCDC Layer Update Slot structure
*
* This structure stores layer update requests to be applied on next frame.
* This is the base structure behind the atomic layer update infrastructure.
*
* Atomic layer update provides a way to update all layer's parameters
* simultaneously. This is needed to avoid incompatible sequential updates
* like this one:
* 1) update layer format from RGB888 (1 plane/buffer) to YUV422
* (2 planes/buffers)
* 2) the format update is applied but the DMA channel for the second
* plane/buffer is not enabled
* 3) enable the DMA channel for the second plane
*
* @fb_flip: fb_flip object
* @updated_configs: bitmask used to record modified configs
* @configs: new config values
*/
struct atmel_hlcdc_layer_update_slot {
struct atmel_hlcdc_layer_fb_flip *fb_flip;
unsigned long *updated_configs;
u32 *configs;
};
/**
* Atmel HLCDC Layer Update structure
*
* This structure provides a way to queue layer update requests.
*
* At a given time there is at most:
* - one pending update request, which means the update request has been
* committed (or validated) and is waiting for the DMA channel(s) to be
* available
* - one request being prepared, which means someone started a layer update
* but has not committed it yet. There cannot be more than one started
* request, because the update lock is taken when starting a layer update
* and release when committing or rolling back the request.
*
* @slots: update slots. One is used for pending request and the other one
* for started update request
* @pending: the pending slot index or -1 if no request is pending
* @next: the started update slot index or -1 no update has been started
*/
struct atmel_hlcdc_layer_update {
struct atmel_hlcdc_layer_update_slot slots[2];
int pending;
int next;
};
enum atmel_hlcdc_layer_dma_channel_status {
ATMEL_HLCDC_LAYER_DISABLED,
ATMEL_HLCDC_LAYER_ENABLED,
ATMEL_HLCDC_LAYER_DISABLING,
};
/**
* Atmel HLCDC Layer DMA channel structure
*
* This structure stores information on the DMA channel associated to a
* given layer.
*
* @status: DMA channel status
* @cur: current framebuffer
* @queue: next framebuffer
* @dscrs: allocated DMA descriptors
*/
struct atmel_hlcdc_layer_dma_channel {
enum atmel_hlcdc_layer_dma_channel_status status;
struct atmel_hlcdc_layer_fb_flip *cur;
struct atmel_hlcdc_layer_fb_flip *queue;
struct atmel_hlcdc_dma_channel_dscr *dscrs;
};
/**
* Atmel HLCDC Layer structure
*
* This structure stores information on the layer instance.
*
* @desc: layer description
* @max_planes: maximum planes/buffers that can be associated with this layer.
* This depends on the supported formats.
* @hlcdc: pointer to the atmel_hlcdc structure provided by the MFD device
* @dma: dma channel
* @gc: fb flip garbage collector
* @update: update handler
* @lock: layer lock
*/
struct atmel_hlcdc_layer {
const struct atmel_hlcdc_layer_desc *desc;
int max_planes;
struct atmel_hlcdc *hlcdc;
struct workqueue_struct *wq;
struct drm_flip_work gc;
struct atmel_hlcdc_layer_dma_channel dma;
struct atmel_hlcdc_layer_update update;
spinlock_t lock;
};
void atmel_hlcdc_layer_irq(struct atmel_hlcdc_layer *layer);
int atmel_hlcdc_layer_init(struct drm_device *dev,
struct atmel_hlcdc_layer *layer,
const struct atmel_hlcdc_layer_desc *desc);
void atmel_hlcdc_layer_cleanup(struct drm_device *dev,
struct atmel_hlcdc_layer *layer);
int atmel_hlcdc_layer_disable(struct atmel_hlcdc_layer *layer);
int atmel_hlcdc_layer_update_start(struct atmel_hlcdc_layer *layer);
void atmel_hlcdc_layer_update_cfg(struct atmel_hlcdc_layer *layer, int cfg,
u32 mask, u32 val);
void atmel_hlcdc_layer_update_set_fb(struct atmel_hlcdc_layer *layer,
struct drm_framebuffer *fb,
unsigned int *offsets);
void atmel_hlcdc_layer_update_set_finished(struct atmel_hlcdc_layer *layer,
void (*finished)(void *data),
void *finished_data);
void atmel_hlcdc_layer_update_rollback(struct atmel_hlcdc_layer *layer);
void atmel_hlcdc_layer_update_commit(struct atmel_hlcdc_layer *layer);
#endif /* DRM_ATMEL_HLCDC_LAYER_H */

View File

@ -0,0 +1,319 @@
/*
* Copyright (C) 2014 Traphandler
* Copyright (C) 2014 Free Electrons
* Copyright (C) 2014 Atmel
*
* Author: Jean-Jacques Hiblot <jjhiblot@traphandler.com>
* Author: Boris BREZILLON <boris.brezillon@free-electrons.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/of_graph.h>
#include <drm/drmP.h>
#include <drm/drm_panel.h>
#include "atmel_hlcdc_dc.h"
/**
* Atmel HLCDC RGB output mode
*/
enum atmel_hlcdc_connector_rgb_mode {
ATMEL_HLCDC_CONNECTOR_RGB444,
ATMEL_HLCDC_CONNECTOR_RGB565,
ATMEL_HLCDC_CONNECTOR_RGB666,
ATMEL_HLCDC_CONNECTOR_RGB888,
};
/**
* Atmel HLCDC RGB connector structure
*
* This structure stores RGB slave device information.
*
* @connector: DRM connector
* @encoder: DRM encoder
* @dc: pointer to the atmel_hlcdc_dc structure
* @dpms: current DPMS mode
*/
struct atmel_hlcdc_rgb_output {
struct drm_connector connector;
struct drm_encoder encoder;
struct atmel_hlcdc_dc *dc;
int dpms;
};
static inline struct atmel_hlcdc_rgb_output *
drm_connector_to_atmel_hlcdc_rgb_output(struct drm_connector *connector)
{
return container_of(connector, struct atmel_hlcdc_rgb_output,
connector);
}
static inline struct atmel_hlcdc_rgb_output *
drm_encoder_to_atmel_hlcdc_rgb_output(struct drm_encoder *encoder)
{
return container_of(encoder, struct atmel_hlcdc_rgb_output, encoder);
}
/**
* Atmel HLCDC Panel device structure
*
* This structure is specialization of the slave device structure to
* interface with drm panels.
*
* @base: base slave device fields
* @panel: drm panel attached to this slave device
*/
struct atmel_hlcdc_panel {
struct atmel_hlcdc_rgb_output base;
struct drm_panel *panel;
};
static inline struct atmel_hlcdc_panel *
atmel_hlcdc_rgb_output_to_panel(struct atmel_hlcdc_rgb_output *output)
{
return container_of(output, struct atmel_hlcdc_panel, base);
}
static void atmel_hlcdc_panel_encoder_dpms(struct drm_encoder *encoder,
int mode)
{
struct atmel_hlcdc_rgb_output *rgb =
drm_encoder_to_atmel_hlcdc_rgb_output(encoder);
struct atmel_hlcdc_panel *panel = atmel_hlcdc_rgb_output_to_panel(rgb);
if (mode != DRM_MODE_DPMS_ON)
mode = DRM_MODE_DPMS_OFF;
if (mode == rgb->dpms)
return;
if (mode != DRM_MODE_DPMS_ON)
drm_panel_disable(panel->panel);
else
drm_panel_enable(panel->panel);
rgb->dpms = mode;
}
static bool
atmel_hlcdc_panel_encoder_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted)
{
return true;
}
static void atmel_hlcdc_panel_encoder_prepare(struct drm_encoder *encoder)
{
atmel_hlcdc_panel_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
}
static void atmel_hlcdc_panel_encoder_commit(struct drm_encoder *encoder)
{
atmel_hlcdc_panel_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
}
static void
atmel_hlcdc_rgb_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted)
{
struct atmel_hlcdc_rgb_output *rgb =
drm_encoder_to_atmel_hlcdc_rgb_output(encoder);
struct drm_display_info *info = &rgb->connector.display_info;
unsigned int cfg;
cfg = 0;
if (info->num_bus_formats) {
switch (info->bus_formats[0]) {
case MEDIA_BUS_FMT_RGB666_1X18:
cfg |= ATMEL_HLCDC_CONNECTOR_RGB666 << 8;
break;
case MEDIA_BUS_FMT_RGB888_1X24:
cfg |= ATMEL_HLCDC_CONNECTOR_RGB888 << 8;
break;
default:
break;
}
}
regmap_update_bits(rgb->dc->hlcdc->regmap, ATMEL_HLCDC_CFG(5),
ATMEL_HLCDC_MODE_MASK,
cfg);
}
static struct drm_encoder_helper_funcs atmel_hlcdc_panel_encoder_helper_funcs = {
.dpms = atmel_hlcdc_panel_encoder_dpms,
.mode_fixup = atmel_hlcdc_panel_encoder_mode_fixup,
.prepare = atmel_hlcdc_panel_encoder_prepare,
.commit = atmel_hlcdc_panel_encoder_commit,
.mode_set = atmel_hlcdc_rgb_encoder_mode_set,
};
static void atmel_hlcdc_rgb_encoder_destroy(struct drm_encoder *encoder)
{
drm_encoder_cleanup(encoder);
memset(encoder, 0, sizeof(*encoder));
}
static const struct drm_encoder_funcs atmel_hlcdc_panel_encoder_funcs = {
.destroy = atmel_hlcdc_rgb_encoder_destroy,
};
static int atmel_hlcdc_panel_get_modes(struct drm_connector *connector)
{
struct atmel_hlcdc_rgb_output *rgb =
drm_connector_to_atmel_hlcdc_rgb_output(connector);
struct atmel_hlcdc_panel *panel = atmel_hlcdc_rgb_output_to_panel(rgb);
return panel->panel->funcs->get_modes(panel->panel);
}
static int atmel_hlcdc_rgb_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct atmel_hlcdc_rgb_output *rgb =
drm_connector_to_atmel_hlcdc_rgb_output(connector);
return atmel_hlcdc_dc_mode_valid(rgb->dc, mode);
}
static struct drm_encoder *
atmel_hlcdc_rgb_best_encoder(struct drm_connector *connector)
{
struct atmel_hlcdc_rgb_output *rgb =
drm_connector_to_atmel_hlcdc_rgb_output(connector);
return &rgb->encoder;
}
static struct drm_connector_helper_funcs atmel_hlcdc_panel_connector_helper_funcs = {
.get_modes = atmel_hlcdc_panel_get_modes,
.mode_valid = atmel_hlcdc_rgb_mode_valid,
.best_encoder = atmel_hlcdc_rgb_best_encoder,
};
static enum drm_connector_status
atmel_hlcdc_panel_connector_detect(struct drm_connector *connector, bool force)
{
return connector_status_connected;
}
static void
atmel_hlcdc_panel_connector_destroy(struct drm_connector *connector)
{
struct atmel_hlcdc_rgb_output *rgb =
drm_connector_to_atmel_hlcdc_rgb_output(connector);
struct atmel_hlcdc_panel *panel = atmel_hlcdc_rgb_output_to_panel(rgb);
drm_panel_detach(panel->panel);
drm_connector_cleanup(connector);
}
static const struct drm_connector_funcs atmel_hlcdc_panel_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.detect = atmel_hlcdc_panel_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = atmel_hlcdc_panel_connector_destroy,
};
static int atmel_hlcdc_create_panel_output(struct drm_device *dev,
struct of_endpoint *ep)
{
struct atmel_hlcdc_dc *dc = dev->dev_private;
struct device_node *np;
struct drm_panel *p = NULL;
struct atmel_hlcdc_panel *panel;
int ret;
np = of_graph_get_remote_port_parent(ep->local_node);
if (!np)
return -EINVAL;
p = of_drm_find_panel(np);
of_node_put(np);
if (!p)
return -EPROBE_DEFER;
panel = devm_kzalloc(dev->dev, sizeof(*panel), GFP_KERNEL);
if (!panel)
return -EINVAL;
panel->base.dpms = DRM_MODE_DPMS_OFF;
panel->base.dc = dc;
drm_encoder_helper_add(&panel->base.encoder,
&atmel_hlcdc_panel_encoder_helper_funcs);
ret = drm_encoder_init(dev, &panel->base.encoder,
&atmel_hlcdc_panel_encoder_funcs,
DRM_MODE_ENCODER_LVDS);
if (ret)
return ret;
panel->base.connector.dpms = DRM_MODE_DPMS_OFF;
panel->base.connector.polled = DRM_CONNECTOR_POLL_CONNECT;
drm_connector_helper_add(&panel->base.connector,
&atmel_hlcdc_panel_connector_helper_funcs);
ret = drm_connector_init(dev, &panel->base.connector,
&atmel_hlcdc_panel_connector_funcs,
DRM_MODE_CONNECTOR_LVDS);
if (ret)
goto err_encoder_cleanup;
drm_mode_connector_attach_encoder(&panel->base.connector,
&panel->base.encoder);
panel->base.encoder.possible_crtcs = 0x1;
drm_panel_attach(p, &panel->base.connector);
panel->panel = p;
return 0;
err_encoder_cleanup:
drm_encoder_cleanup(&panel->base.encoder);
return ret;
}
int atmel_hlcdc_create_outputs(struct drm_device *dev)
{
struct device_node *port_np, *np;
struct of_endpoint ep;
int ret;
port_np = of_get_child_by_name(dev->dev->of_node, "port");
if (!port_np)
return -EINVAL;
np = of_get_child_by_name(port_np, "endpoint");
of_node_put(port_np);
if (!np)
return -EINVAL;
ret = of_graph_parse_endpoint(np, &ep);
of_node_put(port_np);
if (ret)
return ret;
/* We currently only support panel output */
return atmel_hlcdc_create_panel_output(dev, &ep);
}

View File

@ -0,0 +1,856 @@
/*
* Copyright (C) 2014 Free Electrons
* Copyright (C) 2014 Atmel
*
* Author: Boris BREZILLON <boris.brezillon@free-electrons.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "atmel_hlcdc_dc.h"
#define SUBPIXEL_MASK 0xffff
static uint32_t rgb_formats[] = {
DRM_FORMAT_XRGB4444,
DRM_FORMAT_ARGB4444,
DRM_FORMAT_RGBA4444,
DRM_FORMAT_ARGB1555,
DRM_FORMAT_RGB565,
DRM_FORMAT_RGB888,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_ARGB8888,
DRM_FORMAT_RGBA8888,
};
struct atmel_hlcdc_formats atmel_hlcdc_plane_rgb_formats = {
.formats = rgb_formats,
.nformats = ARRAY_SIZE(rgb_formats),
};
static uint32_t rgb_and_yuv_formats[] = {
DRM_FORMAT_XRGB4444,
DRM_FORMAT_ARGB4444,
DRM_FORMAT_RGBA4444,
DRM_FORMAT_ARGB1555,
DRM_FORMAT_RGB565,
DRM_FORMAT_RGB888,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_ARGB8888,
DRM_FORMAT_RGBA8888,
DRM_FORMAT_AYUV,
DRM_FORMAT_YUYV,
DRM_FORMAT_UYVY,
DRM_FORMAT_YVYU,
DRM_FORMAT_VYUY,
DRM_FORMAT_NV21,
DRM_FORMAT_NV61,
DRM_FORMAT_YUV422,
DRM_FORMAT_YUV420,
};
struct atmel_hlcdc_formats atmel_hlcdc_plane_rgb_and_yuv_formats = {
.formats = rgb_and_yuv_formats,
.nformats = ARRAY_SIZE(rgb_and_yuv_formats),
};
static int atmel_hlcdc_format_to_plane_mode(u32 format, u32 *mode)
{
switch (format) {
case DRM_FORMAT_XRGB4444:
*mode = ATMEL_HLCDC_XRGB4444_MODE;
break;
case DRM_FORMAT_ARGB4444:
*mode = ATMEL_HLCDC_ARGB4444_MODE;
break;
case DRM_FORMAT_RGBA4444:
*mode = ATMEL_HLCDC_RGBA4444_MODE;
break;
case DRM_FORMAT_RGB565:
*mode = ATMEL_HLCDC_RGB565_MODE;
break;
case DRM_FORMAT_RGB888:
*mode = ATMEL_HLCDC_RGB888_MODE;
break;
case DRM_FORMAT_ARGB1555:
*mode = ATMEL_HLCDC_ARGB1555_MODE;
break;
case DRM_FORMAT_XRGB8888:
*mode = ATMEL_HLCDC_XRGB8888_MODE;
break;
case DRM_FORMAT_ARGB8888:
*mode = ATMEL_HLCDC_ARGB8888_MODE;
break;
case DRM_FORMAT_RGBA8888:
*mode = ATMEL_HLCDC_RGBA8888_MODE;
break;
case DRM_FORMAT_AYUV:
*mode = ATMEL_HLCDC_AYUV_MODE;
break;
case DRM_FORMAT_YUYV:
*mode = ATMEL_HLCDC_YUYV_MODE;
break;
case DRM_FORMAT_UYVY:
*mode = ATMEL_HLCDC_UYVY_MODE;
break;
case DRM_FORMAT_YVYU:
*mode = ATMEL_HLCDC_YVYU_MODE;
break;
case DRM_FORMAT_VYUY:
*mode = ATMEL_HLCDC_VYUY_MODE;
break;
case DRM_FORMAT_NV21:
*mode = ATMEL_HLCDC_NV21_MODE;
break;
case DRM_FORMAT_NV61:
*mode = ATMEL_HLCDC_NV61_MODE;
break;
case DRM_FORMAT_YUV420:
*mode = ATMEL_HLCDC_YUV420_MODE;
break;
case DRM_FORMAT_YUV422:
*mode = ATMEL_HLCDC_YUV422_MODE;
break;
default:
return -ENOTSUPP;
}
return 0;
}
static bool atmel_hlcdc_format_embedds_alpha(u32 format)
{
int i;
for (i = 0; i < sizeof(format); i++) {
char tmp = (format >> (8 * i)) & 0xff;
if (tmp == 'A')
return true;
}
return false;
}
static u32 heo_downscaling_xcoef[] = {
0x11343311,
0x000000f7,
0x1635300c,
0x000000f9,
0x1b362c08,
0x000000fb,
0x1f372804,
0x000000fe,
0x24382400,
0x00000000,
0x28371ffe,
0x00000004,
0x2c361bfb,
0x00000008,
0x303516f9,
0x0000000c,
};
static u32 heo_downscaling_ycoef[] = {
0x00123737,
0x00173732,
0x001b382d,
0x001f3928,
0x00243824,
0x0028391f,
0x002d381b,
0x00323717,
};
static u32 heo_upscaling_xcoef[] = {
0xf74949f7,
0x00000000,
0xf55f33fb,
0x000000fe,
0xf5701efe,
0x000000ff,
0xf87c0dff,
0x00000000,
0x00800000,
0x00000000,
0x0d7cf800,
0x000000ff,
0x1e70f5ff,
0x000000fe,
0x335ff5fe,
0x000000fb,
};
static u32 heo_upscaling_ycoef[] = {
0x00004040,
0x00075920,
0x00056f0c,
0x00027b03,
0x00008000,
0x00037b02,
0x000c6f05,
0x00205907,
};
static void
atmel_hlcdc_plane_update_pos_and_size(struct atmel_hlcdc_plane *plane,
struct atmel_hlcdc_plane_update_req *req)
{
const struct atmel_hlcdc_layer_cfg_layout *layout =
&plane->layer.desc->layout;
if (layout->size)
atmel_hlcdc_layer_update_cfg(&plane->layer,
layout->size,
0xffffffff,
(req->crtc_w - 1) |
((req->crtc_h - 1) << 16));
if (layout->memsize)
atmel_hlcdc_layer_update_cfg(&plane->layer,
layout->memsize,
0xffffffff,
(req->src_w - 1) |
((req->src_h - 1) << 16));
if (layout->pos)
atmel_hlcdc_layer_update_cfg(&plane->layer,
layout->pos,
0xffffffff,
req->crtc_x |
(req->crtc_y << 16));
/* TODO: rework the rescaling part */
if (req->crtc_w != req->src_w || req->crtc_h != req->src_h) {
u32 factor_reg = 0;
if (req->crtc_w != req->src_w) {
int i;
u32 factor;
u32 *coeff_tab = heo_upscaling_xcoef;
u32 max_memsize;
if (req->crtc_w < req->src_w)
coeff_tab = heo_downscaling_xcoef;
for (i = 0; i < ARRAY_SIZE(heo_upscaling_xcoef); i++)
atmel_hlcdc_layer_update_cfg(&plane->layer,
17 + i,
0xffffffff,
coeff_tab[i]);
factor = ((8 * 256 * req->src_w) - (256 * 4)) /
req->crtc_w;
factor++;
max_memsize = ((factor * req->crtc_w) + (256 * 4)) /
2048;
if (max_memsize > req->src_w)
factor--;
factor_reg |= factor | 0x80000000;
}
if (req->crtc_h != req->src_h) {
int i;
u32 factor;
u32 *coeff_tab = heo_upscaling_ycoef;
u32 max_memsize;
if (req->crtc_w < req->src_w)
coeff_tab = heo_downscaling_ycoef;
for (i = 0; i < ARRAY_SIZE(heo_upscaling_ycoef); i++)
atmel_hlcdc_layer_update_cfg(&plane->layer,
33 + i,
0xffffffff,
coeff_tab[i]);
factor = ((8 * 256 * req->src_w) - (256 * 4)) /
req->crtc_w;
factor++;
max_memsize = ((factor * req->crtc_w) + (256 * 4)) /
2048;
if (max_memsize > req->src_w)
factor--;
factor_reg |= (factor << 16) | 0x80000000;
}
atmel_hlcdc_layer_update_cfg(&plane->layer, 13, 0xffffffff,
factor_reg);
}
}
static void
atmel_hlcdc_plane_update_general_settings(struct atmel_hlcdc_plane *plane,
struct atmel_hlcdc_plane_update_req *req)
{
const struct atmel_hlcdc_layer_cfg_layout *layout =
&plane->layer.desc->layout;
unsigned int cfg = ATMEL_HLCDC_LAYER_DMA;
if (plane->base.type != DRM_PLANE_TYPE_PRIMARY) {
cfg |= ATMEL_HLCDC_LAYER_OVR | ATMEL_HLCDC_LAYER_ITER2BL |
ATMEL_HLCDC_LAYER_ITER;
if (atmel_hlcdc_format_embedds_alpha(req->fb->pixel_format))
cfg |= ATMEL_HLCDC_LAYER_LAEN;
else
cfg |= ATMEL_HLCDC_LAYER_GAEN;
}
atmel_hlcdc_layer_update_cfg(&plane->layer,
ATMEL_HLCDC_LAYER_DMA_CFG_ID,
ATMEL_HLCDC_LAYER_DMA_BLEN_MASK,
ATMEL_HLCDC_LAYER_DMA_BLEN_INCR16);
atmel_hlcdc_layer_update_cfg(&plane->layer, layout->general_config,
ATMEL_HLCDC_LAYER_ITER2BL |
ATMEL_HLCDC_LAYER_ITER |
ATMEL_HLCDC_LAYER_GAEN |
ATMEL_HLCDC_LAYER_LAEN |
ATMEL_HLCDC_LAYER_OVR |
ATMEL_HLCDC_LAYER_DMA, cfg);
}
static void atmel_hlcdc_plane_update_format(struct atmel_hlcdc_plane *plane,
struct atmel_hlcdc_plane_update_req *req)
{
u32 cfg;
int ret;
ret = atmel_hlcdc_format_to_plane_mode(req->fb->pixel_format, &cfg);
if (ret)
return;
if ((req->fb->pixel_format == DRM_FORMAT_YUV422 ||
req->fb->pixel_format == DRM_FORMAT_NV61) &&
(plane->rotation & (BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270))))
cfg |= ATMEL_HLCDC_YUV422ROT;
atmel_hlcdc_layer_update_cfg(&plane->layer,
ATMEL_HLCDC_LAYER_FORMAT_CFG_ID,
0xffffffff,
cfg);
/*
* Rotation optimization is not working on RGB888 (rotation is still
* working but without any optimization).
*/
if (req->fb->pixel_format == DRM_FORMAT_RGB888)
cfg = ATMEL_HLCDC_LAYER_DMA_ROTDIS;
else
cfg = 0;
atmel_hlcdc_layer_update_cfg(&plane->layer,
ATMEL_HLCDC_LAYER_DMA_CFG_ID,
ATMEL_HLCDC_LAYER_DMA_ROTDIS, cfg);
}
static void atmel_hlcdc_plane_update_buffers(struct atmel_hlcdc_plane *plane,
struct atmel_hlcdc_plane_update_req *req)
{
struct atmel_hlcdc_layer *layer = &plane->layer;
const struct atmel_hlcdc_layer_cfg_layout *layout =
&layer->desc->layout;
int i;
atmel_hlcdc_layer_update_set_fb(&plane->layer, req->fb, req->offsets);
for (i = 0; i < req->nplanes; i++) {
if (layout->xstride[i]) {
atmel_hlcdc_layer_update_cfg(&plane->layer,
layout->xstride[i],
0xffffffff,
req->xstride[i]);
}
if (layout->pstride[i]) {
atmel_hlcdc_layer_update_cfg(&plane->layer,
layout->pstride[i],
0xffffffff,
req->pstride[i]);
}
}
}
static int atmel_hlcdc_plane_check_update_req(struct drm_plane *p,
struct atmel_hlcdc_plane_update_req *req,
const struct drm_display_mode *mode)
{
struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p);
const struct atmel_hlcdc_layer_cfg_layout *layout =
&plane->layer.desc->layout;
if (!layout->size &&
(mode->hdisplay != req->crtc_w ||
mode->vdisplay != req->crtc_h))
return -EINVAL;
if (plane->layer.desc->max_height &&
req->crtc_h > plane->layer.desc->max_height)
return -EINVAL;
if (plane->layer.desc->max_width &&
req->crtc_w > plane->layer.desc->max_width)
return -EINVAL;
if ((req->crtc_h != req->src_h || req->crtc_w != req->src_w) &&
(!layout->memsize ||
atmel_hlcdc_format_embedds_alpha(req->fb->pixel_format)))
return -EINVAL;
if (req->crtc_x < 0 || req->crtc_y < 0)
return -EINVAL;
if (req->crtc_w + req->crtc_x > mode->hdisplay ||
req->crtc_h + req->crtc_y > mode->vdisplay)
return -EINVAL;
return 0;
}
int atmel_hlcdc_plane_prepare_update_req(struct drm_plane *p,
struct atmel_hlcdc_plane_update_req *req,
const struct drm_display_mode *mode)
{
struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p);
unsigned int patched_crtc_w;
unsigned int patched_crtc_h;
unsigned int patched_src_w;
unsigned int patched_src_h;
unsigned int tmp;
int x_offset = 0;
int y_offset = 0;
int hsub = 1;
int vsub = 1;
int i;
if ((req->src_x | req->src_y | req->src_w | req->src_h) &
SUBPIXEL_MASK)
return -EINVAL;
req->src_x >>= 16;
req->src_y >>= 16;
req->src_w >>= 16;
req->src_h >>= 16;
req->nplanes = drm_format_num_planes(req->fb->pixel_format);
if (req->nplanes > ATMEL_HLCDC_MAX_PLANES)
return -EINVAL;
/*
* Swap width and size in case of 90 or 270 degrees rotation
*/
if (plane->rotation & (BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270))) {
tmp = req->crtc_w;
req->crtc_w = req->crtc_h;
req->crtc_h = tmp;
tmp = req->src_w;
req->src_w = req->src_h;
req->src_h = tmp;
}
if (req->crtc_x + req->crtc_w > mode->hdisplay)
patched_crtc_w = mode->hdisplay - req->crtc_x;
else
patched_crtc_w = req->crtc_w;
if (req->crtc_x < 0) {
patched_crtc_w += req->crtc_x;
x_offset = -req->crtc_x;
req->crtc_x = 0;
}
if (req->crtc_y + req->crtc_h > mode->vdisplay)
patched_crtc_h = mode->vdisplay - req->crtc_y;
else
patched_crtc_h = req->crtc_h;
if (req->crtc_y < 0) {
patched_crtc_h += req->crtc_y;
y_offset = -req->crtc_y;
req->crtc_y = 0;
}
patched_src_w = DIV_ROUND_CLOSEST(patched_crtc_w * req->src_w,
req->crtc_w);
patched_src_h = DIV_ROUND_CLOSEST(patched_crtc_h * req->src_h,
req->crtc_h);
hsub = drm_format_horz_chroma_subsampling(req->fb->pixel_format);
vsub = drm_format_vert_chroma_subsampling(req->fb->pixel_format);
for (i = 0; i < req->nplanes; i++) {
unsigned int offset = 0;
int xdiv = i ? hsub : 1;
int ydiv = i ? vsub : 1;
req->bpp[i] = drm_format_plane_cpp(req->fb->pixel_format, i);
if (!req->bpp[i])
return -EINVAL;
switch (plane->rotation & 0xf) {
case BIT(DRM_ROTATE_90):
offset = ((y_offset + req->src_y + patched_src_w - 1) /
ydiv) * req->fb->pitches[i];
offset += ((x_offset + req->src_x) / xdiv) *
req->bpp[i];
req->xstride[i] = ((patched_src_w - 1) / ydiv) *
req->fb->pitches[i];
req->pstride[i] = -req->fb->pitches[i] - req->bpp[i];
break;
case BIT(DRM_ROTATE_180):
offset = ((y_offset + req->src_y + patched_src_h - 1) /
ydiv) * req->fb->pitches[i];
offset += ((x_offset + req->src_x + patched_src_w - 1) /
xdiv) * req->bpp[i];
req->xstride[i] = ((((patched_src_w - 1) / xdiv) - 1) *
req->bpp[i]) - req->fb->pitches[i];
req->pstride[i] = -2 * req->bpp[i];
break;
case BIT(DRM_ROTATE_270):
offset = ((y_offset + req->src_y) / ydiv) *
req->fb->pitches[i];
offset += ((x_offset + req->src_x + patched_src_h - 1) /
xdiv) * req->bpp[i];
req->xstride[i] = -(((patched_src_w - 1) / ydiv) *
req->fb->pitches[i]) -
(2 * req->bpp[i]);
req->pstride[i] = req->fb->pitches[i] - req->bpp[i];
break;
case BIT(DRM_ROTATE_0):
default:
offset = ((y_offset + req->src_y) / ydiv) *
req->fb->pitches[i];
offset += ((x_offset + req->src_x) / xdiv) *
req->bpp[i];
req->xstride[i] = req->fb->pitches[i] -
((patched_src_w / xdiv) *
req->bpp[i]);
req->pstride[i] = 0;
break;
}
req->offsets[i] = offset + req->fb->offsets[i];
}
req->src_w = patched_src_w;
req->src_h = patched_src_h;
req->crtc_w = patched_crtc_w;
req->crtc_h = patched_crtc_h;
return atmel_hlcdc_plane_check_update_req(p, req, mode);
}
int atmel_hlcdc_plane_apply_update_req(struct drm_plane *p,
struct atmel_hlcdc_plane_update_req *req)
{
struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p);
int ret;
ret = atmel_hlcdc_layer_update_start(&plane->layer);
if (ret)
return ret;
atmel_hlcdc_plane_update_pos_and_size(plane, req);
atmel_hlcdc_plane_update_general_settings(plane, req);
atmel_hlcdc_plane_update_format(plane, req);
atmel_hlcdc_plane_update_buffers(plane, req);
atmel_hlcdc_layer_update_commit(&plane->layer);
return 0;
}
int atmel_hlcdc_plane_update_with_mode(struct drm_plane *p,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int crtc_x, int crtc_y,
unsigned int crtc_w,
unsigned int crtc_h,
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h,
const struct drm_display_mode *mode)
{
struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p);
struct atmel_hlcdc_plane_update_req req;
int ret = 0;
memset(&req, 0, sizeof(req));
req.crtc_x = crtc_x;
req.crtc_y = crtc_y;
req.crtc_w = crtc_w;
req.crtc_h = crtc_h;
req.src_x = src_x;
req.src_y = src_y;
req.src_w = src_w;
req.src_h = src_h;
req.fb = fb;
ret = atmel_hlcdc_plane_prepare_update_req(&plane->base, &req, mode);
if (ret)
return ret;
if (!req.crtc_h || !req.crtc_w)
return atmel_hlcdc_layer_disable(&plane->layer);
return atmel_hlcdc_plane_apply_update_req(&plane->base, &req);
}
static int atmel_hlcdc_plane_update(struct drm_plane *p,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t src_x, uint32_t src_y,
uint32_t src_w, uint32_t src_h)
{
return atmel_hlcdc_plane_update_with_mode(p, crtc, fb, crtc_x, crtc_y,
crtc_w, crtc_h, src_x, src_y,
src_w, src_h, &crtc->hwmode);
}
static int atmel_hlcdc_plane_disable(struct drm_plane *p)
{
struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p);
return atmel_hlcdc_layer_disable(&plane->layer);
}
static void atmel_hlcdc_plane_destroy(struct drm_plane *p)
{
struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p);
if (plane->base.fb)
drm_framebuffer_unreference(plane->base.fb);
atmel_hlcdc_layer_cleanup(p->dev, &plane->layer);
drm_plane_cleanup(p);
devm_kfree(p->dev->dev, plane);
}
static int atmel_hlcdc_plane_set_alpha(struct atmel_hlcdc_plane *plane,
u8 alpha)
{
atmel_hlcdc_layer_update_start(&plane->layer);
atmel_hlcdc_layer_update_cfg(&plane->layer,
plane->layer.desc->layout.general_config,
ATMEL_HLCDC_LAYER_GA_MASK,
alpha << ATMEL_HLCDC_LAYER_GA_SHIFT);
atmel_hlcdc_layer_update_commit(&plane->layer);
return 0;
}
static int atmel_hlcdc_plane_set_rotation(struct atmel_hlcdc_plane *plane,
unsigned int rotation)
{
plane->rotation = rotation;
return 0;
}
static int atmel_hlcdc_plane_set_property(struct drm_plane *p,
struct drm_property *property,
uint64_t value)
{
struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p);
struct atmel_hlcdc_plane_properties *props = plane->properties;
if (property == props->alpha)
atmel_hlcdc_plane_set_alpha(plane, value);
else if (property == props->rotation)
atmel_hlcdc_plane_set_rotation(plane, value);
else
return -EINVAL;
return 0;
}
static void atmel_hlcdc_plane_init_properties(struct atmel_hlcdc_plane *plane,
const struct atmel_hlcdc_layer_desc *desc,
struct atmel_hlcdc_plane_properties *props)
{
struct regmap *regmap = plane->layer.hlcdc->regmap;
if (desc->type == ATMEL_HLCDC_OVERLAY_LAYER ||
desc->type == ATMEL_HLCDC_CURSOR_LAYER) {
drm_object_attach_property(&plane->base.base,
props->alpha, 255);
/* Set default alpha value */
regmap_update_bits(regmap,
desc->regs_offset +
ATMEL_HLCDC_LAYER_GENERAL_CFG(&plane->layer),
ATMEL_HLCDC_LAYER_GA_MASK,
ATMEL_HLCDC_LAYER_GA_MASK);
}
if (desc->layout.xstride && desc->layout.pstride)
drm_object_attach_property(&plane->base.base,
props->rotation,
BIT(DRM_ROTATE_0));
if (desc->layout.csc) {
/*
* TODO: decare a "yuv-to-rgb-conv-factors" property to let
* userspace modify these factors (using a BLOB property ?).
*/
regmap_write(regmap,
desc->regs_offset +
ATMEL_HLCDC_LAYER_CSC_CFG(&plane->layer, 0),
0x4c900091);
regmap_write(regmap,
desc->regs_offset +
ATMEL_HLCDC_LAYER_CSC_CFG(&plane->layer, 1),
0x7a5f5090);
regmap_write(regmap,
desc->regs_offset +
ATMEL_HLCDC_LAYER_CSC_CFG(&plane->layer, 2),
0x40040890);
}
}
static struct drm_plane_funcs layer_plane_funcs = {
.update_plane = atmel_hlcdc_plane_update,
.disable_plane = atmel_hlcdc_plane_disable,
.set_property = atmel_hlcdc_plane_set_property,
.destroy = atmel_hlcdc_plane_destroy,
};
static struct atmel_hlcdc_plane *
atmel_hlcdc_plane_create(struct drm_device *dev,
const struct atmel_hlcdc_layer_desc *desc,
struct atmel_hlcdc_plane_properties *props)
{
struct atmel_hlcdc_plane *plane;
enum drm_plane_type type;
int ret;
plane = devm_kzalloc(dev->dev, sizeof(*plane), GFP_KERNEL);
if (!plane)
return ERR_PTR(-ENOMEM);
ret = atmel_hlcdc_layer_init(dev, &plane->layer, desc);
if (ret)
return ERR_PTR(ret);
if (desc->type == ATMEL_HLCDC_BASE_LAYER)
type = DRM_PLANE_TYPE_PRIMARY;
else if (desc->type == ATMEL_HLCDC_CURSOR_LAYER)
type = DRM_PLANE_TYPE_CURSOR;
else
type = DRM_PLANE_TYPE_OVERLAY;
ret = drm_universal_plane_init(dev, &plane->base, 0,
&layer_plane_funcs,
desc->formats->formats,
desc->formats->nformats, type);
if (ret)
return ERR_PTR(ret);
/* Set default property values*/
atmel_hlcdc_plane_init_properties(plane, desc, props);
return plane;
}
static struct atmel_hlcdc_plane_properties *
atmel_hlcdc_plane_create_properties(struct drm_device *dev)
{
struct atmel_hlcdc_plane_properties *props;
props = devm_kzalloc(dev->dev, sizeof(*props), GFP_KERNEL);
if (!props)
return ERR_PTR(-ENOMEM);
props->alpha = drm_property_create_range(dev, 0, "alpha", 0, 255);
if (!props->alpha)
return ERR_PTR(-ENOMEM);
props->rotation = drm_mode_create_rotation_property(dev,
BIT(DRM_ROTATE_0) |
BIT(DRM_ROTATE_90) |
BIT(DRM_ROTATE_180) |
BIT(DRM_ROTATE_270));
if (!props->rotation)
return ERR_PTR(-ENOMEM);
return props;
}
struct atmel_hlcdc_planes *
atmel_hlcdc_create_planes(struct drm_device *dev)
{
struct atmel_hlcdc_dc *dc = dev->dev_private;
struct atmel_hlcdc_plane_properties *props;
struct atmel_hlcdc_planes *planes;
const struct atmel_hlcdc_layer_desc *descs = dc->desc->layers;
int nlayers = dc->desc->nlayers;
int i;
planes = devm_kzalloc(dev->dev, sizeof(*planes), GFP_KERNEL);
if (!planes)
return ERR_PTR(-ENOMEM);
for (i = 0; i < nlayers; i++) {
if (descs[i].type == ATMEL_HLCDC_OVERLAY_LAYER)
planes->noverlays++;
}
if (planes->noverlays) {
planes->overlays = devm_kzalloc(dev->dev,
planes->noverlays *
sizeof(*planes->overlays),
GFP_KERNEL);
if (!planes->overlays)
return ERR_PTR(-ENOMEM);
}
props = atmel_hlcdc_plane_create_properties(dev);
if (IS_ERR(props))
return ERR_CAST(props);
planes->noverlays = 0;
for (i = 0; i < nlayers; i++) {
struct atmel_hlcdc_plane *plane;
if (descs[i].type == ATMEL_HLCDC_PP_LAYER)
continue;
plane = atmel_hlcdc_plane_create(dev, &descs[i], props);
if (IS_ERR(plane))
return ERR_CAST(plane);
plane->properties = props;
switch (descs[i].type) {
case ATMEL_HLCDC_BASE_LAYER:
if (planes->primary)
return ERR_PTR(-EINVAL);
planes->primary = plane;
break;
case ATMEL_HLCDC_OVERLAY_LAYER:
planes->overlays[planes->noverlays++] = plane;
break;
case ATMEL_HLCDC_CURSOR_LAYER:
if (planes->cursor)
return ERR_PTR(-EINVAL);
planes->cursor = plane;
break;
default:
break;
}
}
return planes;
}

View File

@ -207,12 +207,22 @@ int bochs_fbdev_init(struct bochs_device *bochs)
if (ret)
return ret;
drm_fb_helper_single_add_all_connectors(&bochs->fb.helper);
ret = drm_fb_helper_single_add_all_connectors(&bochs->fb.helper);
if (ret)
goto fini;
drm_helper_disable_unused_functions(bochs->dev);
drm_fb_helper_initial_config(&bochs->fb.helper, 32);
ret = drm_fb_helper_initial_config(&bochs->fb.helper, 32);
if (ret)
goto fini;
bochs->fb.initialized = true;
return 0;
fini:
drm_fb_helper_fini(&bochs->fb.helper);
return ret;
}
void bochs_fbdev_fini(struct bochs_device *bochs)

View File

@ -18,10 +18,6 @@ MODULE_PARM_DESC(defy, "default y resolution");
/* ---------------------------------------------------------------------- */
static void bochs_crtc_load_lut(struct drm_crtc *crtc)
{
}
static void bochs_crtc_dpms(struct drm_crtc *crtc, int mode)
{
switch (mode) {
@ -144,7 +140,6 @@ static const struct drm_crtc_helper_funcs bochs_helper_funcs = {
.mode_set_base = bochs_crtc_mode_set_base,
.prepare = bochs_crtc_prepare,
.commit = bochs_crtc_commit,
.load_lut = bochs_crtc_load_lut,
};
static void bochs_crtc_init(struct drm_device *dev)

View File

@ -1,5 +1,13 @@
config DRM_DW_HDMI
tristate
depends on DRM
select DRM_KMS_HELPER
config DRM_PTN3460
tristate "PTN3460 DP/LVDS bridge"
depends on DRM
depends on OF
select DRM_KMS_HELPER
select DRM_PANEL
---help---
ptn3460 eDP-LVDS bridge chip driver.

View File

@ -1,3 +1,4 @@
ccflags-y := -Iinclude/drm
obj-$(CONFIG_DRM_PTN3460) += ptn3460.o
obj-$(CONFIG_DRM_DW_HDMI) += dw_hdmi.o

View File

@ -837,7 +837,8 @@ enum {
HDMI_PHY_CONF0_PDZ_OFFSET = 7,
HDMI_PHY_CONF0_ENTMDS_MASK = 0x40,
HDMI_PHY_CONF0_ENTMDS_OFFSET = 6,
HDMI_PHY_CONF0_SPARECTRL = 0x20,
HDMI_PHY_CONF0_SPARECTRL_MASK = 0x20,
HDMI_PHY_CONF0_SPARECTRL_OFFSET = 5,
HDMI_PHY_CONF0_GEN2_PDDQ_MASK = 0x10,
HDMI_PHY_CONF0_GEN2_PDDQ_OFFSET = 4,
HDMI_PHY_CONF0_GEN2_TXPWRON_MASK = 0x8,
@ -1029,4 +1030,5 @@ enum {
HDMI_A_VIDPOLCFG_HSYNCPOL_ACTIVE_HIGH = 0x2,
HDMI_A_VIDPOLCFG_HSYNCPOL_ACTIVE_LOW = 0x0,
};
#endif /* __IMX_HDMI_H__ */

View File

@ -13,20 +13,23 @@
* GNU General Public License for more details.
*/
#include <linux/delay.h>
#include <linux/gpio.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
#include <linux/i2c.h>
#include <linux/gpio.h>
#include <linux/delay.h>
#include <linux/of_graph.h>
#include "drmP.h"
#include "drm_edid.h"
#include "drm_crtc.h"
#include "drm_crtc_helper.h"
#include <drm/drm_panel.h>
#include "bridge/ptn3460.h"
#include "drm_crtc.h"
#include "drm_crtc_helper.h"
#include "drm_edid.h"
#include "drmP.h"
#define PTN3460_EDID_ADDR 0x0
#define PTN3460_EDID_EMULATION_ADDR 0x84
#define PTN3460_EDID_ENABLE_EMULATION 0
@ -36,15 +39,27 @@
struct ptn3460_bridge {
struct drm_connector connector;
struct i2c_client *client;
struct drm_encoder *encoder;
struct drm_bridge *bridge;
struct drm_bridge bridge;
struct edid *edid;
int gpio_pd_n;
int gpio_rst_n;
struct drm_panel *panel;
struct gpio_desc *gpio_pd_n;
struct gpio_desc *gpio_rst_n;
u32 edid_emulation;
bool enabled;
};
static inline struct ptn3460_bridge *
bridge_to_ptn3460(struct drm_bridge *bridge)
{
return container_of(bridge, struct ptn3460_bridge, bridge);
}
static inline struct ptn3460_bridge *
connector_to_ptn3460(struct drm_connector *connector)
{
return container_of(connector, struct ptn3460_bridge, connector);
}
static int ptn3460_read_bytes(struct ptn3460_bridge *ptn_bridge, char addr,
u8 *buf, int len)
{
@ -92,7 +107,7 @@ static int ptn3460_select_edid(struct ptn3460_bridge *ptn_bridge)
ret = ptn3460_write_byte(ptn_bridge, PTN3460_EDID_SRAM_LOAD_ADDR,
ptn_bridge->edid_emulation);
if (ret) {
DRM_ERROR("Failed to transfer edid to sram, ret=%d\n", ret);
DRM_ERROR("Failed to transfer EDID to sram, ret=%d\n", ret);
return ret;
}
@ -102,7 +117,7 @@ static int ptn3460_select_edid(struct ptn3460_bridge *ptn_bridge)
ret = ptn3460_write_byte(ptn_bridge, PTN3460_EDID_EMULATION_ADDR, val);
if (ret) {
DRM_ERROR("Failed to write edid value, ret=%d\n", ret);
DRM_ERROR("Failed to write EDID value, ret=%d\n", ret);
return ret;
}
@ -111,19 +126,21 @@ static int ptn3460_select_edid(struct ptn3460_bridge *ptn_bridge)
static void ptn3460_pre_enable(struct drm_bridge *bridge)
{
struct ptn3460_bridge *ptn_bridge = bridge->driver_private;
struct ptn3460_bridge *ptn_bridge = bridge_to_ptn3460(bridge);
int ret;
if (ptn_bridge->enabled)
return;
if (gpio_is_valid(ptn_bridge->gpio_pd_n))
gpio_set_value(ptn_bridge->gpio_pd_n, 1);
gpiod_set_value(ptn_bridge->gpio_pd_n, 1);
if (gpio_is_valid(ptn_bridge->gpio_rst_n)) {
gpio_set_value(ptn_bridge->gpio_rst_n, 0);
udelay(10);
gpio_set_value(ptn_bridge->gpio_rst_n, 1);
gpiod_set_value(ptn_bridge->gpio_rst_n, 0);
usleep_range(10, 20);
gpiod_set_value(ptn_bridge->gpio_rst_n, 1);
if (drm_panel_prepare(ptn_bridge->panel)) {
DRM_ERROR("failed to prepare panel\n");
return;
}
/*
@ -135,73 +152,67 @@ static void ptn3460_pre_enable(struct drm_bridge *bridge)
ret = ptn3460_select_edid(ptn_bridge);
if (ret)
DRM_ERROR("Select edid failed ret=%d\n", ret);
DRM_ERROR("Select EDID failed ret=%d\n", ret);
ptn_bridge->enabled = true;
}
static void ptn3460_enable(struct drm_bridge *bridge)
{
struct ptn3460_bridge *ptn_bridge = bridge_to_ptn3460(bridge);
if (drm_panel_enable(ptn_bridge->panel)) {
DRM_ERROR("failed to enable panel\n");
return;
}
}
static void ptn3460_disable(struct drm_bridge *bridge)
{
struct ptn3460_bridge *ptn_bridge = bridge->driver_private;
struct ptn3460_bridge *ptn_bridge = bridge_to_ptn3460(bridge);
if (!ptn_bridge->enabled)
return;
ptn_bridge->enabled = false;
if (gpio_is_valid(ptn_bridge->gpio_rst_n))
gpio_set_value(ptn_bridge->gpio_rst_n, 1);
if (drm_panel_disable(ptn_bridge->panel)) {
DRM_ERROR("failed to disable panel\n");
return;
}
if (gpio_is_valid(ptn_bridge->gpio_pd_n))
gpio_set_value(ptn_bridge->gpio_pd_n, 0);
gpiod_set_value(ptn_bridge->gpio_rst_n, 1);
gpiod_set_value(ptn_bridge->gpio_pd_n, 0);
}
static void ptn3460_post_disable(struct drm_bridge *bridge)
{
struct ptn3460_bridge *ptn_bridge = bridge_to_ptn3460(bridge);
if (drm_panel_unprepare(ptn_bridge->panel)) {
DRM_ERROR("failed to unprepare panel\n");
return;
}
}
void ptn3460_bridge_destroy(struct drm_bridge *bridge)
{
struct ptn3460_bridge *ptn_bridge = bridge->driver_private;
drm_bridge_cleanup(bridge);
if (gpio_is_valid(ptn_bridge->gpio_pd_n))
gpio_free(ptn_bridge->gpio_pd_n);
if (gpio_is_valid(ptn_bridge->gpio_rst_n))
gpio_free(ptn_bridge->gpio_rst_n);
/* Nothing else to free, we've got devm allocated memory */
}
struct drm_bridge_funcs ptn3460_bridge_funcs = {
.pre_enable = ptn3460_pre_enable,
.enable = ptn3460_enable,
.disable = ptn3460_disable,
.post_disable = ptn3460_post_disable,
.destroy = ptn3460_bridge_destroy,
};
int ptn3460_get_modes(struct drm_connector *connector)
static int ptn3460_get_modes(struct drm_connector *connector)
{
struct ptn3460_bridge *ptn_bridge;
u8 *edid;
int ret, num_modes;
int ret, num_modes = 0;
bool power_off;
ptn_bridge = container_of(connector, struct ptn3460_bridge, connector);
ptn_bridge = connector_to_ptn3460(connector);
if (ptn_bridge->edid)
return drm_add_edid_modes(connector, ptn_bridge->edid);
power_off = !ptn_bridge->enabled;
ptn3460_pre_enable(ptn_bridge->bridge);
ptn3460_pre_enable(&ptn_bridge->bridge);
edid = kmalloc(EDID_LENGTH, GFP_KERNEL);
if (!edid) {
DRM_ERROR("Failed to allocate edid\n");
DRM_ERROR("Failed to allocate EDID\n");
return 0;
}
@ -209,7 +220,6 @@ int ptn3460_get_modes(struct drm_connector *connector)
EDID_LENGTH);
if (ret) {
kfree(edid);
num_modes = 0;
goto out;
}
@ -220,124 +230,188 @@ int ptn3460_get_modes(struct drm_connector *connector)
out:
if (power_off)
ptn3460_disable(ptn_bridge->bridge);
ptn3460_disable(&ptn_bridge->bridge);
return num_modes;
}
struct drm_encoder *ptn3460_best_encoder(struct drm_connector *connector)
static struct drm_encoder *ptn3460_best_encoder(struct drm_connector *connector)
{
struct ptn3460_bridge *ptn_bridge;
struct ptn3460_bridge *ptn_bridge = connector_to_ptn3460(connector);
ptn_bridge = container_of(connector, struct ptn3460_bridge, connector);
return ptn_bridge->encoder;
return ptn_bridge->bridge.encoder;
}
struct drm_connector_helper_funcs ptn3460_connector_helper_funcs = {
static struct drm_connector_helper_funcs ptn3460_connector_helper_funcs = {
.get_modes = ptn3460_get_modes,
.best_encoder = ptn3460_best_encoder,
};
enum drm_connector_status ptn3460_detect(struct drm_connector *connector,
static enum drm_connector_status ptn3460_detect(struct drm_connector *connector,
bool force)
{
return connector_status_connected;
}
void ptn3460_connector_destroy(struct drm_connector *connector)
static void ptn3460_connector_destroy(struct drm_connector *connector)
{
drm_connector_cleanup(connector);
}
struct drm_connector_funcs ptn3460_connector_funcs = {
static struct drm_connector_funcs ptn3460_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.detect = ptn3460_detect,
.destroy = ptn3460_connector_destroy,
};
int ptn3460_init(struct drm_device *dev, struct drm_encoder *encoder,
struct i2c_client *client, struct device_node *node)
int ptn3460_bridge_attach(struct drm_bridge *bridge)
{
struct ptn3460_bridge *ptn_bridge = bridge_to_ptn3460(bridge);
int ret;
struct drm_bridge *bridge;
struct ptn3460_bridge *ptn_bridge;
bridge = devm_kzalloc(dev->dev, sizeof(*bridge), GFP_KERNEL);
if (!bridge) {
DRM_ERROR("Failed to allocate drm bridge\n");
return -ENOMEM;
if (!bridge->encoder) {
DRM_ERROR("Parent encoder object not found");
return -ENODEV;
}
ptn_bridge = devm_kzalloc(dev->dev, sizeof(*ptn_bridge), GFP_KERNEL);
if (!ptn_bridge) {
DRM_ERROR("Failed to allocate ptn bridge\n");
return -ENOMEM;
}
ptn_bridge->client = client;
ptn_bridge->encoder = encoder;
ptn_bridge->bridge = bridge;
ptn_bridge->gpio_pd_n = of_get_named_gpio(node, "powerdown-gpio", 0);
if (gpio_is_valid(ptn_bridge->gpio_pd_n)) {
ret = gpio_request_one(ptn_bridge->gpio_pd_n,
GPIOF_OUT_INIT_HIGH, "PTN3460_PD_N");
if (ret) {
DRM_ERROR("Request powerdown-gpio failed (%d)\n", ret);
return ret;
}
}
ptn_bridge->gpio_rst_n = of_get_named_gpio(node, "reset-gpio", 0);
if (gpio_is_valid(ptn_bridge->gpio_rst_n)) {
/*
* Request the reset pin low to avoid the bridge being
* initialized prematurely
*/
ret = gpio_request_one(ptn_bridge->gpio_rst_n,
GPIOF_OUT_INIT_LOW, "PTN3460_RST_N");
if (ret) {
DRM_ERROR("Request reset-gpio failed (%d)\n", ret);
gpio_free(ptn_bridge->gpio_pd_n);
return ret;
}
}
ret = of_property_read_u32(node, "edid-emulation",
&ptn_bridge->edid_emulation);
if (ret) {
DRM_ERROR("Can't read edid emulation value\n");
goto err;
}
ret = drm_bridge_init(dev, bridge, &ptn3460_bridge_funcs);
if (ret) {
DRM_ERROR("Failed to initialize bridge with drm\n");
goto err;
}
bridge->driver_private = ptn_bridge;
encoder->bridge = bridge;
ret = drm_connector_init(dev, &ptn_bridge->connector,
ptn_bridge->connector.polled = DRM_CONNECTOR_POLL_HPD;
ret = drm_connector_init(bridge->dev, &ptn_bridge->connector,
&ptn3460_connector_funcs, DRM_MODE_CONNECTOR_LVDS);
if (ret) {
DRM_ERROR("Failed to initialize connector with drm\n");
goto err;
return ret;
}
drm_connector_helper_add(&ptn_bridge->connector,
&ptn3460_connector_helper_funcs);
&ptn3460_connector_helper_funcs);
drm_connector_register(&ptn_bridge->connector);
drm_mode_connector_attach_encoder(&ptn_bridge->connector, encoder);
drm_mode_connector_attach_encoder(&ptn_bridge->connector,
bridge->encoder);
return 0;
if (ptn_bridge->panel)
drm_panel_attach(ptn_bridge->panel, &ptn_bridge->connector);
drm_helper_hpd_irq_event(ptn_bridge->connector.dev);
err:
if (gpio_is_valid(ptn_bridge->gpio_pd_n))
gpio_free(ptn_bridge->gpio_pd_n);
if (gpio_is_valid(ptn_bridge->gpio_rst_n))
gpio_free(ptn_bridge->gpio_rst_n);
return ret;
}
EXPORT_SYMBOL(ptn3460_init);
static struct drm_bridge_funcs ptn3460_bridge_funcs = {
.pre_enable = ptn3460_pre_enable,
.enable = ptn3460_enable,
.disable = ptn3460_disable,
.post_disable = ptn3460_post_disable,
.attach = ptn3460_bridge_attach,
};
static int ptn3460_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct device *dev = &client->dev;
struct ptn3460_bridge *ptn_bridge;
struct device_node *endpoint, *panel_node;
int ret;
ptn_bridge = devm_kzalloc(dev, sizeof(*ptn_bridge), GFP_KERNEL);
if (!ptn_bridge) {
return -ENOMEM;
}
endpoint = of_graph_get_next_endpoint(dev->of_node, NULL);
if (endpoint) {
panel_node = of_graph_get_remote_port_parent(endpoint);
if (panel_node) {
ptn_bridge->panel = of_drm_find_panel(panel_node);
of_node_put(panel_node);
if (!ptn_bridge->panel)
return -EPROBE_DEFER;
}
}
ptn_bridge->client = client;
ptn_bridge->gpio_pd_n = devm_gpiod_get(&client->dev, "powerdown");
if (IS_ERR(ptn_bridge->gpio_pd_n)) {
ret = PTR_ERR(ptn_bridge->gpio_pd_n);
dev_err(dev, "cannot get gpio_pd_n %d\n", ret);
return ret;
}
ret = gpiod_direction_output(ptn_bridge->gpio_pd_n, 1);
if (ret) {
DRM_ERROR("cannot configure gpio_pd_n\n");
return ret;
}
ptn_bridge->gpio_rst_n = devm_gpiod_get(&client->dev, "reset");
if (IS_ERR(ptn_bridge->gpio_rst_n)) {
ret = PTR_ERR(ptn_bridge->gpio_rst_n);
DRM_ERROR("cannot get gpio_rst_n %d\n", ret);
return ret;
}
/*
* Request the reset pin low to avoid the bridge being
* initialized prematurely
*/
ret = gpiod_direction_output(ptn_bridge->gpio_rst_n, 0);
if (ret) {
DRM_ERROR("cannot configure gpio_rst_n\n");
return ret;
}
ret = of_property_read_u32(dev->of_node, "edid-emulation",
&ptn_bridge->edid_emulation);
if (ret) {
dev_err(dev, "Can't read EDID emulation value\n");
return ret;
}
ptn_bridge->bridge.funcs = &ptn3460_bridge_funcs;
ptn_bridge->bridge.of_node = dev->of_node;
ret = drm_bridge_add(&ptn_bridge->bridge);
if (ret) {
DRM_ERROR("Failed to add bridge\n");
return ret;
}
i2c_set_clientdata(client, ptn_bridge);
return 0;
}
static int ptn3460_remove(struct i2c_client *client)
{
struct ptn3460_bridge *ptn_bridge = i2c_get_clientdata(client);
drm_bridge_remove(&ptn_bridge->bridge);
return 0;
}
static const struct i2c_device_id ptn3460_i2c_table[] = {
{"nxp,ptn3460", 0},
{},
};
MODULE_DEVICE_TABLE(i2c, ptn3460_i2c_table);
static const struct of_device_id ptn3460_match[] = {
{ .compatible = "nxp,ptn3460" },
{},
};
MODULE_DEVICE_TABLE(of, ptn3460_match);
static struct i2c_driver ptn3460_driver = {
.id_table = ptn3460_i2c_table,
.probe = ptn3460_probe,
.remove = ptn3460_remove,
.driver = {
.name = "nxp,ptn3460",
.owner = THIS_MODULE,
.of_match_table = ptn3460_match,
},
};
module_i2c_driver(ptn3460_driver);
MODULE_AUTHOR("Sean Paul <seanpaul@chromium.org>");
MODULE_DESCRIPTION("NXP ptn3460 eDP-LVDS converter driver");
MODULE_LICENSE("GPL v2");

View File

@ -317,17 +317,17 @@ int cirrus_fbdev_init(struct cirrus_device *cdev)
ret = drm_fb_helper_init(cdev->dev, &gfbdev->helper,
cdev->num_crtc, CIRRUSFB_CONN_LIMIT);
if (ret) {
kfree(gfbdev);
if (ret)
return ret;
ret = drm_fb_helper_single_add_all_connectors(&gfbdev->helper);
if (ret)
return ret;
}
drm_fb_helper_single_add_all_connectors(&gfbdev->helper);
/* disable all the possible outputs/crtcs before entering KMS mode */
drm_helper_disable_unused_functions(cdev->dev);
drm_fb_helper_initial_config(&gfbdev->helper, bpp_sel);
return 0;
return drm_fb_helper_initial_config(&gfbdev->helper, bpp_sel);
}
void cirrus_fbdev_fini(struct cirrus_device *cdev)

View File

@ -56,6 +56,11 @@ drm_atomic_state_alloc(struct drm_device *dev)
if (!state)
return NULL;
/* TODO legacy paths should maybe do a better job about
* setting this appropriately?
*/
state->allow_modeset = true;
state->num_connector = ACCESS_ONCE(dev->mode_config.num_connector);
state->crtcs = kcalloc(dev->mode_config.num_crtc,
@ -129,6 +134,7 @@ void drm_atomic_state_clear(struct drm_atomic_state *state)
connector->funcs->atomic_destroy_state(connector,
state->connector_states[i]);
state->connector_states[i] = NULL;
}
for (i = 0; i < config->num_crtc; i++) {
@ -139,6 +145,7 @@ void drm_atomic_state_clear(struct drm_atomic_state *state)
crtc->funcs->atomic_destroy_state(crtc,
state->crtc_states[i]);
state->crtc_states[i] = NULL;
}
for (i = 0; i < config->num_total_plane; i++) {
@ -149,6 +156,7 @@ void drm_atomic_state_clear(struct drm_atomic_state *state)
plane->funcs->atomic_destroy_state(plane,
state->plane_states[i]);
state->plane_states[i] = NULL;
}
}
EXPORT_SYMBOL(drm_atomic_state_clear);
@ -216,6 +224,83 @@ drm_atomic_get_crtc_state(struct drm_atomic_state *state,
}
EXPORT_SYMBOL(drm_atomic_get_crtc_state);
/**
* drm_atomic_crtc_set_property - set property on CRTC
* @crtc: the drm CRTC to set a property on
* @state: the state object to update with the new property value
* @property: the property to set
* @val: the new property value
*
* Use this instead of calling crtc->atomic_set_property directly.
* This function handles generic/core properties and calls out to
* driver's ->atomic_set_property() for driver properties. To ensure
* consistent behavior you must call this function rather than the
* driver hook directly.
*
* RETURNS:
* Zero on success, error code on failure
*/
int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
struct drm_crtc_state *state, struct drm_property *property,
uint64_t val)
{
struct drm_device *dev = crtc->dev;
struct drm_mode_config *config = &dev->mode_config;
/* FIXME: Mode prop is missing, which also controls ->enable. */
if (property == config->prop_active) {
state->active = val;
} else if (crtc->funcs->atomic_set_property)
return crtc->funcs->atomic_set_property(crtc, state, property, val);
return -EINVAL;
}
EXPORT_SYMBOL(drm_atomic_crtc_set_property);
/*
* This function handles generic/core properties and calls out to
* driver's ->atomic_get_property() for driver properties. To ensure
* consistent behavior you must call this function rather than the
* driver hook directly.
*/
int drm_atomic_crtc_get_property(struct drm_crtc *crtc,
const struct drm_crtc_state *state,
struct drm_property *property, uint64_t *val)
{
if (crtc->funcs->atomic_get_property)
return crtc->funcs->atomic_get_property(crtc, state, property, val);
return -EINVAL;
}
/**
* drm_atomic_crtc_check - check crtc state
* @crtc: crtc to check
* @state: crtc state to check
*
* Provides core sanity checks for crtc state.
*
* RETURNS:
* Zero on success, error code on failure
*/
static int drm_atomic_crtc_check(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
/* NOTE: we explicitly don't enforce constraints such as primary
* layer covering entire screen, since that is something we want
* to allow (on hw that supports it). For hw that does not, it
* should be checked in driver's crtc->atomic_check() vfunc.
*
* TODO: Add generic modeset state checks once we support those.
*/
if (state->active && !state->enable) {
DRM_DEBUG_KMS("[CRTC:%d] active without enabled\n",
crtc->base.id);
return -EINVAL;
}
return 0;
}
/**
* drm_atomic_get_plane_state - get plane state
* @state: global atomic state object
@ -271,6 +356,185 @@ drm_atomic_get_plane_state(struct drm_atomic_state *state,
}
EXPORT_SYMBOL(drm_atomic_get_plane_state);
/**
* drm_atomic_plane_set_property - set property on plane
* @plane: the drm plane to set a property on
* @state: the state object to update with the new property value
* @property: the property to set
* @val: the new property value
*
* Use this instead of calling plane->atomic_set_property directly.
* This function handles generic/core properties and calls out to
* driver's ->atomic_set_property() for driver properties. To ensure
* consistent behavior you must call this function rather than the
* driver hook directly.
*
* RETURNS:
* Zero on success, error code on failure
*/
int drm_atomic_plane_set_property(struct drm_plane *plane,
struct drm_plane_state *state, struct drm_property *property,
uint64_t val)
{
struct drm_device *dev = plane->dev;
struct drm_mode_config *config = &dev->mode_config;
if (property == config->prop_fb_id) {
struct drm_framebuffer *fb = drm_framebuffer_lookup(dev, val);
drm_atomic_set_fb_for_plane(state, fb);
if (fb)
drm_framebuffer_unreference(fb);
} else if (property == config->prop_crtc_id) {
struct drm_crtc *crtc = drm_crtc_find(dev, val);
return drm_atomic_set_crtc_for_plane(state, crtc);
} else if (property == config->prop_crtc_x) {
state->crtc_x = U642I64(val);
} else if (property == config->prop_crtc_y) {
state->crtc_y = U642I64(val);
} else if (property == config->prop_crtc_w) {
state->crtc_w = val;
} else if (property == config->prop_crtc_h) {
state->crtc_h = val;
} else if (property == config->prop_src_x) {
state->src_x = val;
} else if (property == config->prop_src_y) {
state->src_y = val;
} else if (property == config->prop_src_w) {
state->src_w = val;
} else if (property == config->prop_src_h) {
state->src_h = val;
} else if (property == config->rotation_property) {
state->rotation = val;
} else if (plane->funcs->atomic_set_property) {
return plane->funcs->atomic_set_property(plane, state,
property, val);
} else {
return -EINVAL;
}
return 0;
}
EXPORT_SYMBOL(drm_atomic_plane_set_property);
/*
* This function handles generic/core properties and calls out to
* driver's ->atomic_get_property() for driver properties. To ensure
* consistent behavior you must call this function rather than the
* driver hook directly.
*/
static int
drm_atomic_plane_get_property(struct drm_plane *plane,
const struct drm_plane_state *state,
struct drm_property *property, uint64_t *val)
{
struct drm_device *dev = plane->dev;
struct drm_mode_config *config = &dev->mode_config;
if (property == config->prop_fb_id) {
*val = (state->fb) ? state->fb->base.id : 0;
} else if (property == config->prop_crtc_id) {
*val = (state->crtc) ? state->crtc->base.id : 0;
} else if (property == config->prop_crtc_x) {
*val = I642U64(state->crtc_x);
} else if (property == config->prop_crtc_y) {
*val = I642U64(state->crtc_y);
} else if (property == config->prop_crtc_w) {
*val = state->crtc_w;
} else if (property == config->prop_crtc_h) {
*val = state->crtc_h;
} else if (property == config->prop_src_x) {
*val = state->src_x;
} else if (property == config->prop_src_y) {
*val = state->src_y;
} else if (property == config->prop_src_w) {
*val = state->src_w;
} else if (property == config->prop_src_h) {
*val = state->src_h;
} else if (plane->funcs->atomic_get_property) {
return plane->funcs->atomic_get_property(plane, state, property, val);
} else {
return -EINVAL;
}
return 0;
}
/**
* drm_atomic_plane_check - check plane state
* @plane: plane to check
* @state: plane state to check
*
* Provides core sanity checks for plane state.
*
* RETURNS:
* Zero on success, error code on failure
*/
static int drm_atomic_plane_check(struct drm_plane *plane,
struct drm_plane_state *state)
{
unsigned int fb_width, fb_height;
unsigned int i;
/* either *both* CRTC and FB must be set, or neither */
if (WARN_ON(state->crtc && !state->fb)) {
DRM_DEBUG_KMS("CRTC set but no FB\n");
return -EINVAL;
} else if (WARN_ON(state->fb && !state->crtc)) {
DRM_DEBUG_KMS("FB set but no CRTC\n");
return -EINVAL;
}
/* if disabled, we don't care about the rest of the state: */
if (!state->crtc)
return 0;
/* Check whether this plane is usable on this CRTC */
if (!(plane->possible_crtcs & drm_crtc_mask(state->crtc))) {
DRM_DEBUG_KMS("Invalid crtc for plane\n");
return -EINVAL;
}
/* Check whether this plane supports the fb pixel format. */
for (i = 0; i < plane->format_count; i++)
if (state->fb->pixel_format == plane->format_types[i])
break;
if (i == plane->format_count) {
DRM_DEBUG_KMS("Invalid pixel format %s\n",
drm_get_format_name(state->fb->pixel_format));
return -EINVAL;
}
/* Give drivers some help against integer overflows */
if (state->crtc_w > INT_MAX ||
state->crtc_x > INT_MAX - (int32_t) state->crtc_w ||
state->crtc_h > INT_MAX ||
state->crtc_y > INT_MAX - (int32_t) state->crtc_h) {
DRM_DEBUG_KMS("Invalid CRTC coordinates %ux%u+%d+%d\n",
state->crtc_w, state->crtc_h,
state->crtc_x, state->crtc_y);
return -ERANGE;
}
fb_width = state->fb->width << 16;
fb_height = state->fb->height << 16;
/* Make sure source coordinates are inside the fb. */
if (state->src_w > fb_width ||
state->src_x > fb_width - state->src_w ||
state->src_h > fb_height ||
state->src_y > fb_height - state->src_h) {
DRM_DEBUG_KMS("Invalid source coordinates "
"%u.%06ux%u.%06u+%u.%06u+%u.%06u\n",
state->src_w >> 16, ((state->src_w & 0xffff) * 15625) >> 10,
state->src_h >> 16, ((state->src_h & 0xffff) * 15625) >> 10,
state->src_x >> 16, ((state->src_x & 0xffff) * 15625) >> 10,
state->src_y >> 16, ((state->src_y & 0xffff) * 15625) >> 10);
return -ENOSPC;
}
return 0;
}
/**
* drm_atomic_get_connector_state - get connector state
* @state: global atomic state object
@ -342,10 +606,114 @@ drm_atomic_get_connector_state(struct drm_atomic_state *state,
}
EXPORT_SYMBOL(drm_atomic_get_connector_state);
/**
* drm_atomic_connector_set_property - set property on connector.
* @connector: the drm connector to set a property on
* @state: the state object to update with the new property value
* @property: the property to set
* @val: the new property value
*
* Use this instead of calling connector->atomic_set_property directly.
* This function handles generic/core properties and calls out to
* driver's ->atomic_set_property() for driver properties. To ensure
* consistent behavior you must call this function rather than the
* driver hook directly.
*
* RETURNS:
* Zero on success, error code on failure
*/
int drm_atomic_connector_set_property(struct drm_connector *connector,
struct drm_connector_state *state, struct drm_property *property,
uint64_t val)
{
struct drm_device *dev = connector->dev;
struct drm_mode_config *config = &dev->mode_config;
if (property == config->prop_crtc_id) {
struct drm_crtc *crtc = drm_crtc_find(dev, val);
return drm_atomic_set_crtc_for_connector(state, crtc);
} else if (property == config->dpms_property) {
/* setting DPMS property requires special handling, which
* is done in legacy setprop path for us. Disallow (for
* now?) atomic writes to DPMS property:
*/
return -EINVAL;
} else if (connector->funcs->atomic_set_property) {
return connector->funcs->atomic_set_property(connector,
state, property, val);
} else {
return -EINVAL;
}
}
EXPORT_SYMBOL(drm_atomic_connector_set_property);
/*
* This function handles generic/core properties and calls out to
* driver's ->atomic_get_property() for driver properties. To ensure
* consistent behavior you must call this function rather than the
* driver hook directly.
*/
static int
drm_atomic_connector_get_property(struct drm_connector *connector,
const struct drm_connector_state *state,
struct drm_property *property, uint64_t *val)
{
struct drm_device *dev = connector->dev;
struct drm_mode_config *config = &dev->mode_config;
if (property == config->prop_crtc_id) {
*val = (state->crtc) ? state->crtc->base.id : 0;
} else if (property == config->dpms_property) {
*val = connector->dpms;
} else if (connector->funcs->atomic_get_property) {
return connector->funcs->atomic_get_property(connector,
state, property, val);
} else {
return -EINVAL;
}
return 0;
}
int drm_atomic_get_property(struct drm_mode_object *obj,
struct drm_property *property, uint64_t *val)
{
struct drm_device *dev = property->dev;
int ret;
switch (obj->type) {
case DRM_MODE_OBJECT_CONNECTOR: {
struct drm_connector *connector = obj_to_connector(obj);
WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
ret = drm_atomic_connector_get_property(connector,
connector->state, property, val);
break;
}
case DRM_MODE_OBJECT_CRTC: {
struct drm_crtc *crtc = obj_to_crtc(obj);
WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
ret = drm_atomic_crtc_get_property(crtc,
crtc->state, property, val);
break;
}
case DRM_MODE_OBJECT_PLANE: {
struct drm_plane *plane = obj_to_plane(obj);
WARN_ON(!drm_modeset_is_locked(&plane->mutex));
ret = drm_atomic_plane_get_property(plane,
plane->state, property, val);
break;
}
default:
ret = -EINVAL;
break;
}
return ret;
}
/**
* drm_atomic_set_crtc_for_plane - set crtc for plane
* @state: the incoming atomic state
* @plane: the plane whose incoming state to update
* @plane_state: the plane whose incoming state to update
* @crtc: crtc to use for the plane
*
* Changing the assigned crtc for a plane requires us to grab the lock and state
@ -358,16 +726,12 @@ EXPORT_SYMBOL(drm_atomic_get_connector_state);
* sequence must be restarted. All other errors are fatal.
*/
int
drm_atomic_set_crtc_for_plane(struct drm_atomic_state *state,
struct drm_plane *plane, struct drm_crtc *crtc)
drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state,
struct drm_crtc *crtc)
{
struct drm_plane_state *plane_state =
drm_atomic_get_plane_state(state, plane);
struct drm_plane *plane = plane_state->plane;
struct drm_crtc_state *crtc_state;
if (WARN_ON(IS_ERR(plane_state)))
return PTR_ERR(plane_state);
if (plane_state->crtc) {
crtc_state = drm_atomic_get_crtc_state(plane_state->state,
plane_state->crtc);
@ -583,14 +947,63 @@ EXPORT_SYMBOL(drm_atomic_legacy_backoff);
*/
int drm_atomic_check_only(struct drm_atomic_state *state)
{
struct drm_mode_config *config = &state->dev->mode_config;
struct drm_device *dev = state->dev;
struct drm_mode_config *config = &dev->mode_config;
int nplanes = config->num_total_plane;
int ncrtcs = config->num_crtc;
int i, ret = 0;
DRM_DEBUG_KMS("checking %p\n", state);
for (i = 0; i < nplanes; i++) {
struct drm_plane *plane = state->planes[i];
if (!plane)
continue;
ret = drm_atomic_plane_check(plane, state->plane_states[i]);
if (ret) {
DRM_DEBUG_KMS("[PLANE:%d] atomic core check failed\n",
plane->base.id);
return ret;
}
}
for (i = 0; i < ncrtcs; i++) {
struct drm_crtc *crtc = state->crtcs[i];
if (!crtc)
continue;
ret = drm_atomic_crtc_check(crtc, state->crtc_states[i]);
if (ret) {
DRM_DEBUG_KMS("[CRTC:%d] atomic core check failed\n",
crtc->base.id);
return ret;
}
}
if (config->funcs->atomic_check)
return config->funcs->atomic_check(state->dev, state);
else
return 0;
ret = config->funcs->atomic_check(state->dev, state);
if (!state->allow_modeset) {
for (i = 0; i < ncrtcs; i++) {
struct drm_crtc *crtc = state->crtcs[i];
struct drm_crtc_state *crtc_state = state->crtc_states[i];
if (!crtc)
continue;
if (crtc_state->mode_changed ||
crtc_state->active_changed) {
DRM_DEBUG_KMS("[CRTC:%d] requires full modeset\n",
crtc->base.id);
return -EINVAL;
}
}
}
return ret;
}
EXPORT_SYMBOL(drm_atomic_check_only);
@ -655,3 +1068,315 @@ int drm_atomic_async_commit(struct drm_atomic_state *state)
return config->funcs->atomic_commit(state->dev, state, true);
}
EXPORT_SYMBOL(drm_atomic_async_commit);
/*
* The big monstor ioctl
*/
static struct drm_pending_vblank_event *create_vblank_event(
struct drm_device *dev, struct drm_file *file_priv, uint64_t user_data)
{
struct drm_pending_vblank_event *e = NULL;
unsigned long flags;
spin_lock_irqsave(&dev->event_lock, flags);
if (file_priv->event_space < sizeof e->event) {
spin_unlock_irqrestore(&dev->event_lock, flags);
goto out;
}
file_priv->event_space -= sizeof e->event;
spin_unlock_irqrestore(&dev->event_lock, flags);
e = kzalloc(sizeof *e, GFP_KERNEL);
if (e == NULL) {
spin_lock_irqsave(&dev->event_lock, flags);
file_priv->event_space += sizeof e->event;
spin_unlock_irqrestore(&dev->event_lock, flags);
goto out;
}
e->event.base.type = DRM_EVENT_FLIP_COMPLETE;
e->event.base.length = sizeof e->event;
e->event.user_data = user_data;
e->base.event = &e->event.base;
e->base.file_priv = file_priv;
e->base.destroy = (void (*) (struct drm_pending_event *)) kfree;
out:
return e;
}
static void destroy_vblank_event(struct drm_device *dev,
struct drm_file *file_priv, struct drm_pending_vblank_event *e)
{
unsigned long flags;
spin_lock_irqsave(&dev->event_lock, flags);
file_priv->event_space += sizeof e->event;
spin_unlock_irqrestore(&dev->event_lock, flags);
kfree(e);
}
static int atomic_set_prop(struct drm_atomic_state *state,
struct drm_mode_object *obj, struct drm_property *prop,
uint64_t prop_value)
{
struct drm_mode_object *ref;
int ret;
if (!drm_property_change_valid_get(prop, prop_value, &ref))
return -EINVAL;
switch (obj->type) {
case DRM_MODE_OBJECT_CONNECTOR: {
struct drm_connector *connector = obj_to_connector(obj);
struct drm_connector_state *connector_state;
connector_state = drm_atomic_get_connector_state(state, connector);
if (IS_ERR(connector_state)) {
ret = PTR_ERR(connector_state);
break;
}
ret = drm_atomic_connector_set_property(connector,
connector_state, prop, prop_value);
break;
}
case DRM_MODE_OBJECT_CRTC: {
struct drm_crtc *crtc = obj_to_crtc(obj);
struct drm_crtc_state *crtc_state;
crtc_state = drm_atomic_get_crtc_state(state, crtc);
if (IS_ERR(crtc_state)) {
ret = PTR_ERR(crtc_state);
break;
}
ret = drm_atomic_crtc_set_property(crtc,
crtc_state, prop, prop_value);
break;
}
case DRM_MODE_OBJECT_PLANE: {
struct drm_plane *plane = obj_to_plane(obj);
struct drm_plane_state *plane_state;
plane_state = drm_atomic_get_plane_state(state, plane);
if (IS_ERR(plane_state)) {
ret = PTR_ERR(plane_state);
break;
}
ret = drm_atomic_plane_set_property(plane,
plane_state, prop, prop_value);
break;
}
default:
ret = -EINVAL;
break;
}
drm_property_change_valid_put(prop, ref);
return ret;
}
int drm_mode_atomic_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
struct drm_mode_atomic *arg = data;
uint32_t __user *objs_ptr = (uint32_t __user *)(unsigned long)(arg->objs_ptr);
uint32_t __user *count_props_ptr = (uint32_t __user *)(unsigned long)(arg->count_props_ptr);
uint32_t __user *props_ptr = (uint32_t __user *)(unsigned long)(arg->props_ptr);
uint64_t __user *prop_values_ptr = (uint64_t __user *)(unsigned long)(arg->prop_values_ptr);
unsigned int copied_objs, copied_props;
struct drm_atomic_state *state;
struct drm_modeset_acquire_ctx ctx;
struct drm_plane *plane;
unsigned plane_mask = 0;
int ret = 0;
unsigned int i, j;
/* disallow for drivers not supporting atomic: */
if (!drm_core_check_feature(dev, DRIVER_ATOMIC))
return -EINVAL;
/* disallow for userspace that has not enabled atomic cap (even
* though this may be a bit overkill, since legacy userspace
* wouldn't know how to call this ioctl)
*/
if (!file_priv->atomic)
return -EINVAL;
if (arg->flags & ~DRM_MODE_ATOMIC_FLAGS)
return -EINVAL;
if (arg->reserved)
return -EINVAL;
if ((arg->flags & DRM_MODE_PAGE_FLIP_ASYNC) &&
!dev->mode_config.async_page_flip)
return -EINVAL;
/* can't test and expect an event at the same time. */
if ((arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) &&
(arg->flags & DRM_MODE_PAGE_FLIP_EVENT))
return -EINVAL;
drm_modeset_acquire_init(&ctx, 0);
state = drm_atomic_state_alloc(dev);
if (!state)
return -ENOMEM;
state->acquire_ctx = &ctx;
state->allow_modeset = !!(arg->flags & DRM_MODE_ATOMIC_ALLOW_MODESET);
retry:
copied_objs = 0;
copied_props = 0;
for (i = 0; i < arg->count_objs; i++) {
uint32_t obj_id, count_props;
struct drm_mode_object *obj;
if (get_user(obj_id, objs_ptr + copied_objs)) {
ret = -EFAULT;
goto fail;
}
obj = drm_mode_object_find(dev, obj_id, DRM_MODE_OBJECT_ANY);
if (!obj || !obj->properties) {
ret = -ENOENT;
goto fail;
}
if (obj->type == DRM_MODE_OBJECT_PLANE) {
plane = obj_to_plane(obj);
plane_mask |= (1 << drm_plane_index(plane));
plane->old_fb = plane->fb;
}
if (get_user(count_props, count_props_ptr + copied_objs)) {
ret = -EFAULT;
goto fail;
}
copied_objs++;
for (j = 0; j < count_props; j++) {
uint32_t prop_id;
uint64_t prop_value;
struct drm_property *prop;
if (get_user(prop_id, props_ptr + copied_props)) {
ret = -EFAULT;
goto fail;
}
prop = drm_property_find(dev, prop_id);
if (!prop) {
ret = -ENOENT;
goto fail;
}
if (copy_from_user(&prop_value,
prop_values_ptr + copied_props,
sizeof(prop_value))) {
ret = -EFAULT;
goto fail;
}
ret = atomic_set_prop(state, obj, prop, prop_value);
if (ret)
goto fail;
copied_props++;
}
}
if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT) {
int ncrtcs = dev->mode_config.num_crtc;
for (i = 0; i < ncrtcs; i++) {
struct drm_crtc_state *crtc_state = state->crtc_states[i];
struct drm_pending_vblank_event *e;
if (!crtc_state)
continue;
e = create_vblank_event(dev, file_priv, arg->user_data);
if (!e) {
ret = -ENOMEM;
goto fail;
}
crtc_state->event = e;
}
}
if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) {
ret = drm_atomic_check_only(state);
/* _check_only() does not free state, unlike _commit() */
drm_atomic_state_free(state);
} else if (arg->flags & DRM_MODE_ATOMIC_NONBLOCK) {
ret = drm_atomic_async_commit(state);
} else {
ret = drm_atomic_commit(state);
}
/* if succeeded, fixup legacy plane crtc/fb ptrs before dropping
* locks (ie. while it is still safe to deref plane->state). We
* need to do this here because the driver entry points cannot
* distinguish between legacy and atomic ioctls.
*/
drm_for_each_plane_mask(plane, dev, plane_mask) {
if (ret == 0) {
struct drm_framebuffer *new_fb = plane->state->fb;
if (new_fb)
drm_framebuffer_reference(new_fb);
plane->fb = new_fb;
plane->crtc = plane->state->crtc;
} else {
plane->old_fb = NULL;
}
if (plane->old_fb) {
drm_framebuffer_unreference(plane->old_fb);
plane->old_fb = NULL;
}
}
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
return ret;
fail:
if (ret == -EDEADLK)
goto backoff;
if (arg->flags & DRM_MODE_PAGE_FLIP_EVENT) {
int ncrtcs = dev->mode_config.num_crtc;
for (i = 0; i < ncrtcs; i++) {
struct drm_crtc_state *crtc_state = state->crtc_states[i];
if (!crtc_state)
continue;
destroy_vblank_event(dev, file_priv, crtc_state->event);
crtc_state->event = NULL;
}
}
drm_atomic_state_free(state);
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
return ret;
backoff:
drm_atomic_state_clear(state);
drm_modeset_backoff(&ctx);
goto retry;
}

View File

@ -297,13 +297,22 @@ mode_fixup(struct drm_atomic_state *state)
}
}
ret = funcs->mode_fixup(encoder, &crtc_state->mode,
&crtc_state->adjusted_mode);
if (!ret) {
DRM_DEBUG_KMS("[ENCODER:%d:%s] fixup failed\n",
encoder->base.id, encoder->name);
return -EINVAL;
if (funcs->atomic_check) {
ret = funcs->atomic_check(encoder, crtc_state,
conn_state);
if (ret) {
DRM_DEBUG_KMS("[ENCODER:%d:%s] check failed\n",
encoder->base.id, encoder->name);
return ret;
}
} else {
ret = funcs->mode_fixup(encoder, &crtc_state->mode,
&crtc_state->adjusted_mode);
if (!ret) {
DRM_DEBUG_KMS("[ENCODER:%d:%s] fixup failed\n",
encoder->base.id, encoder->name);
return -EINVAL;
}
}
}
@ -330,7 +339,35 @@ mode_fixup(struct drm_atomic_state *state)
return 0;
}
static int
static bool
needs_modeset(struct drm_crtc_state *state)
{
return state->mode_changed || state->active_changed;
}
/**
* drm_atomic_helper_check - validate state object for modeset changes
* @dev: DRM device
* @state: the driver state object
*
* Check the state object to see if the requested state is physically possible.
* This does all the crtc and connector related computations for an atomic
* update. It computes and updates crtc_state->mode_changed, adds any additional
* connectors needed for full modesets and calls down into ->mode_fixup
* functions of the driver backend.
*
* IMPORTANT:
*
* Drivers which update ->mode_changed (e.g. in their ->atomic_check hooks if a
* plane update can't be done without a full modeset) _must_ call this function
* afterwards after that change. It is permitted to call this function multiple
* times for the same update, e.g. when the ->atomic_check functions depend upon
* the adjusted dotclock for fifo space allocation and watermark computation.
*
* RETURNS
* Zero for success or -errno
*/
int
drm_atomic_helper_check_modeset(struct drm_device *dev,
struct drm_atomic_state *state)
{
@ -382,12 +419,27 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
crtc = state->crtcs[i];
crtc_state = state->crtc_states[i];
if (!crtc || !crtc_state->mode_changed)
if (!crtc)
continue;
DRM_DEBUG_KMS("[CRTC:%d] needs full modeset, enable: %c\n",
/*
* We must set ->active_changed after walking connectors for
* otherwise an update that only changes active would result in
* a full modeset because update_connector_routing force that.
*/
if (crtc->state->active != crtc_state->active) {
DRM_DEBUG_KMS("[CRTC:%d] active changed\n",
crtc->base.id);
crtc_state->active_changed = true;
}
if (!needs_modeset(crtc_state))
continue;
DRM_DEBUG_KMS("[CRTC:%d] needs all connectors, enable: %c, active: %c\n",
crtc->base.id,
crtc_state->enable ? 'y' : 'n');
crtc_state->enable ? 'y' : 'n',
crtc_state->active ? 'y' : 'n');
ret = drm_atomic_add_affected_connectors(state, crtc);
if (ret != 0)
@ -406,23 +458,23 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
return mode_fixup(state);
}
EXPORT_SYMBOL(drm_atomic_helper_check_modeset);
/**
* drm_atomic_helper_check - validate state object
* drm_atomic_helper_check - validate state object for modeset changes
* @dev: DRM device
* @state: the driver state object
*
* Check the state object to see if the requested state is physically possible.
* Only crtcs and planes have check callbacks, so for any additional (global)
* checking that a driver needs it can simply wrap that around this function.
* Drivers without such needs can directly use this as their ->atomic_check()
* callback.
* This does all the plane update related checks using by calling into the
* ->atomic_check hooks provided by the driver.
*
* RETURNS
* Zero for success or -errno
*/
int drm_atomic_helper_check(struct drm_device *dev,
struct drm_atomic_state *state)
int
drm_atomic_helper_check_planes(struct drm_device *dev,
struct drm_atomic_state *state)
{
int nplanes = dev->mode_config.num_total_plane;
int ncrtcs = dev->mode_config.num_crtc;
@ -445,7 +497,7 @@ int drm_atomic_helper_check(struct drm_device *dev,
ret = funcs->atomic_check(plane, plane_state);
if (ret) {
DRM_DEBUG_KMS("[PLANE:%d] atomic check failed\n",
DRM_DEBUG_KMS("[PLANE:%d] atomic driver check failed\n",
plane->base.id);
return ret;
}
@ -465,16 +517,49 @@ int drm_atomic_helper_check(struct drm_device *dev,
ret = funcs->atomic_check(crtc, state->crtc_states[i]);
if (ret) {
DRM_DEBUG_KMS("[CRTC:%d] atomic check failed\n",
DRM_DEBUG_KMS("[CRTC:%d] atomic driver check failed\n",
crtc->base.id);
return ret;
}
}
return ret;
}
EXPORT_SYMBOL(drm_atomic_helper_check_planes);
/**
* drm_atomic_helper_check - validate state object
* @dev: DRM device
* @state: the driver state object
*
* Check the state object to see if the requested state is physically possible.
* Only crtcs and planes have check callbacks, so for any additional (global)
* checking that a driver needs it can simply wrap that around this function.
* Drivers without such needs can directly use this as their ->atomic_check()
* callback.
*
* This just wraps the two parts of the state checking for planes and modeset
* state in the default order: First it calls drm_atomic_helper_check_modeset()
* and then drm_atomic_helper_check_planes(). The assumption is that the
* ->atomic_check functions depend upon an updated adjusted_mode.clock to
* e.g. properly compute watermarks.
*
* RETURNS
* Zero for success or -errno
*/
int drm_atomic_helper_check(struct drm_device *dev,
struct drm_atomic_state *state)
{
int ret;
ret = drm_atomic_helper_check_modeset(dev, state);
if (ret)
return ret;
ret = drm_atomic_helper_check_planes(dev, state);
if (ret)
return ret;
return ret;
}
EXPORT_SYMBOL(drm_atomic_helper_check);
@ -490,6 +575,7 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
struct drm_connector *connector;
struct drm_encoder_helper_funcs *funcs;
struct drm_encoder *encoder;
struct drm_crtc_state *old_crtc_state;
old_conn_state = old_state->connector_states[i];
connector = old_state->connectors[i];
@ -499,6 +585,11 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
if (!old_conn_state || !old_conn_state->crtc)
continue;
old_crtc_state = old_state->crtc_states[drm_crtc_index(old_conn_state->crtc)];
if (!old_crtc_state->active)
continue;
encoder = old_conn_state->best_encoder;
/* We shouldn't get this far if we didn't previously have
@ -509,6 +600,9 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
funcs = encoder->helper_private;
DRM_DEBUG_KMS("disabling [ENCODER:%d:%s]\n",
encoder->base.id, encoder->name);
/*
* Each encoder has at most one connector (since we always steal
* it away), so we won't call call disable hooks twice.
@ -517,7 +611,7 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
encoder->bridge->funcs->disable(encoder->bridge);
/* Right function depends upon target state. */
if (connector->state->crtc)
if (connector->state->crtc && funcs->prepare)
funcs->prepare(encoder);
else if (funcs->disable)
funcs->disable(encoder);
@ -531,17 +625,26 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
for (i = 0; i < ncrtcs; i++) {
struct drm_crtc_helper_funcs *funcs;
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state;
crtc = old_state->crtcs[i];
old_crtc_state = old_state->crtc_states[i];
/* Shut down everything that needs a full modeset. */
if (!crtc || !crtc->state->mode_changed)
if (!crtc || !needs_modeset(crtc->state))
continue;
if (!old_crtc_state->active)
continue;
funcs = crtc->helper_private;
DRM_DEBUG_KMS("disabling [CRTC:%d]\n",
crtc->base.id);
/* Right function depends upon target state. */
if (crtc->state->enable)
if (crtc->state->enable && funcs->prepare)
funcs->prepare(crtc);
else if (funcs->disable)
funcs->disable(crtc);
@ -620,8 +723,12 @@ crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state)
funcs = crtc->helper_private;
if (crtc->state->enable)
if (crtc->state->enable) {
DRM_DEBUG_KMS("modeset on [CRTC:%d]\n",
crtc->base.id);
funcs->mode_set_nofb(crtc);
}
}
for (i = 0; i < old_state->num_connector; i++) {
@ -642,6 +749,12 @@ crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state)
mode = &new_crtc_state->mode;
adjusted_mode = &new_crtc_state->adjusted_mode;
if (!new_crtc_state->mode_changed)
continue;
DRM_DEBUG_KMS("modeset on [ENCODER:%d:%s]\n",
encoder->base.id, encoder->name);
/*
* Each encoder has at most one connector (since we always steal
* it away), so we won't call call mode_set hooks twice.
@ -694,13 +807,23 @@ void drm_atomic_helper_commit_post_planes(struct drm_device *dev,
crtc = old_state->crtcs[i];
/* Need to filter out CRTCs where only planes change. */
if (!crtc || !crtc->state->mode_changed)
if (!crtc || !needs_modeset(crtc->state))
continue;
if (!crtc->state->active)
continue;
funcs = crtc->helper_private;
if (crtc->state->enable)
funcs->commit(crtc);
if (crtc->state->enable) {
DRM_DEBUG_KMS("enabling [CRTC:%d]\n",
crtc->base.id);
if (funcs->enable)
funcs->enable(crtc);
else
funcs->commit(crtc);
}
}
for (i = 0; i < old_state->num_connector; i++) {
@ -713,9 +836,15 @@ void drm_atomic_helper_commit_post_planes(struct drm_device *dev,
if (!connector || !connector->state->best_encoder)
continue;
if (!connector->state->crtc->state->active)
continue;
encoder = connector->state->best_encoder;
funcs = encoder->helper_private;
DRM_DEBUG_KMS("enabling [ENCODER:%d:%s]\n",
encoder->base.id, encoder->name);
/*
* Each encoder has at most one connector (since we always steal
* it away), so we won't call call enable hooks twice.
@ -723,7 +852,10 @@ void drm_atomic_helper_commit_post_planes(struct drm_device *dev,
if (encoder->bridge)
encoder->bridge->funcs->pre_enable(encoder->bridge);
funcs->commit(encoder);
if (funcs->enable)
funcs->enable(encoder);
else
funcs->commit(encoder);
if (encoder->bridge)
encoder->bridge->funcs->enable(encoder->bridge);
@ -813,6 +945,11 @@ drm_atomic_helper_wait_for_vblanks(struct drm_device *dev,
if (!crtc->state->enable)
continue;
/* Legacy cursor ioctls are completely unsynced, and userspace
* relies on that (by doing tons of cursor updates). */
if (old_state->legacy_cursor_update)
continue;
if (!framebuffer_changed(dev, old_state, crtc))
continue;
@ -1053,12 +1190,19 @@ void drm_atomic_helper_commit_planes(struct drm_device *dev,
funcs = plane->helper_private;
if (!funcs || !funcs->atomic_update)
if (!funcs)
continue;
old_plane_state = old_state->plane_states[i];
funcs->atomic_update(plane, old_plane_state);
/*
* Special-case disabling the plane if drivers support it.
*/
if (drm_atomic_plane_disabling(plane, old_plane_state) &&
funcs->atomic_disable)
funcs->atomic_disable(plane, old_plane_state);
else
funcs->atomic_update(plane, old_plane_state);
}
for (i = 0; i < ncrtcs; i++) {
@ -1222,7 +1366,7 @@ retry:
goto fail;
}
ret = drm_atomic_set_crtc_for_plane(state, plane, crtc);
ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
if (ret != 0)
goto fail;
drm_atomic_set_fb_for_plane(plane_state, fb);
@ -1239,6 +1383,9 @@ retry:
if (ret != 0)
goto fail;
if (plane == crtc->cursor)
state->legacy_cursor_update = true;
/* Driver takes ownership of state on successful commit. */
return 0;
fail:
@ -1301,7 +1448,7 @@ retry:
goto fail;
}
ret = drm_atomic_set_crtc_for_plane(state, plane, NULL);
ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
if (ret != 0)
goto fail;
drm_atomic_set_fb_for_plane(plane_state, NULL);
@ -1314,6 +1461,9 @@ retry:
plane_state->src_h = 0;
plane_state->src_w = 0;
if (plane == plane->crtc->cursor)
state->legacy_cursor_update = true;
ret = drm_atomic_commit(state);
if (ret != 0)
goto fail;
@ -1463,8 +1613,9 @@ retry:
WARN_ON(set->num_connectors);
crtc_state->enable = false;
crtc_state->active = false;
ret = drm_atomic_set_crtc_for_plane(state, crtc->primary, NULL);
ret = drm_atomic_set_crtc_for_plane(primary_state, NULL);
if (ret != 0)
goto fail;
@ -1477,9 +1628,10 @@ retry:
WARN_ON(!set->num_connectors);
crtc_state->enable = true;
crtc_state->active = true;
drm_mode_copy(&crtc_state->mode, set->mode);
ret = drm_atomic_set_crtc_for_plane(state, crtc->primary, crtc);
ret = drm_atomic_set_crtc_for_plane(primary_state, crtc);
if (ret != 0)
goto fail;
drm_atomic_set_fb_for_plane(primary_state, set->fb);
@ -1558,8 +1710,8 @@ retry:
goto fail;
}
ret = crtc->funcs->atomic_set_property(crtc, crtc_state,
property, val);
ret = drm_atomic_crtc_set_property(crtc, crtc_state,
property, val);
if (ret)
goto fail;
@ -1617,8 +1769,8 @@ retry:
goto fail;
}
ret = plane->funcs->atomic_set_property(plane, plane_state,
property, val);
ret = drm_atomic_plane_set_property(plane, plane_state,
property, val);
if (ret)
goto fail;
@ -1676,8 +1828,8 @@ retry:
goto fail;
}
ret = connector->funcs->atomic_set_property(connector, connector_state,
property, val);
ret = drm_atomic_connector_set_property(connector, connector_state,
property, val);
if (ret)
goto fail;
@ -1751,7 +1903,7 @@ retry:
goto fail;
}
ret = drm_atomic_set_crtc_for_plane(state, plane, crtc);
ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
if (ret != 0)
goto fail;
drm_atomic_set_fb_for_plane(plane_state, fb);
@ -1788,6 +1940,83 @@ backoff:
}
EXPORT_SYMBOL(drm_atomic_helper_page_flip);
/**
* drm_atomic_helper_connector_dpms() - connector dpms helper implementation
* @connector: affected connector
* @mode: DPMS mode
*
* This is the main helper function provided by the atomic helper framework for
* implementing the legacy DPMS connector interface. It computes the new desired
* ->active state for the corresponding CRTC (if the connector is enabled) and
* updates it.
*/
void drm_atomic_helper_connector_dpms(struct drm_connector *connector,
int mode)
{
struct drm_mode_config *config = &connector->dev->mode_config;
struct drm_atomic_state *state;
struct drm_crtc_state *crtc_state;
struct drm_crtc *crtc;
struct drm_connector *tmp_connector;
int ret;
bool active = false;
if (mode != DRM_MODE_DPMS_ON)
mode = DRM_MODE_DPMS_OFF;
connector->dpms = mode;
crtc = connector->state->crtc;
if (!crtc)
return;
/* FIXME: ->dpms has no return value so can't forward the -ENOMEM. */
state = drm_atomic_state_alloc(connector->dev);
if (!state)
return;
state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
retry:
crtc_state = drm_atomic_get_crtc_state(state, crtc);
if (IS_ERR(crtc_state))
return;
WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
list_for_each_entry(tmp_connector, &config->connector_list, head) {
if (connector->state->crtc != crtc)
continue;
if (connector->dpms == DRM_MODE_DPMS_ON) {
active = true;
break;
}
}
crtc_state->active = active;
ret = drm_atomic_commit(state);
if (ret != 0)
goto fail;
/* Driver takes ownership of state on successful async commit. */
return;
fail:
if (ret == -EDEADLK)
goto backoff;
drm_atomic_state_free(state);
WARN(1, "Driver bug: Changing ->active failed with ret=%i\n", ret);
return;
backoff:
drm_atomic_state_clear(state);
drm_atomic_legacy_backoff(state);
goto retry;
}
EXPORT_SYMBOL(drm_atomic_helper_connector_dpms);
/**
* DOC: atomic state reset and initialization
*
@ -1814,6 +2043,9 @@ void drm_atomic_helper_crtc_reset(struct drm_crtc *crtc)
{
kfree(crtc->state);
crtc->state = kzalloc(sizeof(*crtc->state), GFP_KERNEL);
if (crtc->state)
crtc->state->crtc = crtc;
}
EXPORT_SYMBOL(drm_atomic_helper_crtc_reset);
@ -1836,6 +2068,7 @@ drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc)
if (state) {
state->mode_changed = false;
state->active_changed = false;
state->planes_changed = false;
state->event = NULL;
}
@ -1873,6 +2106,9 @@ void drm_atomic_helper_plane_reset(struct drm_plane *plane)
kfree(plane->state);
plane->state = kzalloc(sizeof(*plane->state), GFP_KERNEL);
if (plane->state)
plane->state->plane = plane;
}
EXPORT_SYMBOL(drm_atomic_helper_plane_reset);
@ -1930,6 +2166,9 @@ void drm_atomic_helper_connector_reset(struct drm_connector *connector)
{
kfree(connector->state);
connector->state = kzalloc(sizeof(*connector->state), GFP_KERNEL);
if (connector->state)
connector->state->connector = connector;
}
EXPORT_SYMBOL(drm_atomic_helper_connector_reset);

View File

@ -0,0 +1,91 @@
/*
* Copyright (c) 2014 Samsung Electronics Co., Ltd
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sub license,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <linux/err.h>
#include <linux/module.h>
#include <drm/drm_crtc.h>
#include "drm/drmP.h"
static DEFINE_MUTEX(bridge_lock);
static LIST_HEAD(bridge_list);
int drm_bridge_add(struct drm_bridge *bridge)
{
mutex_lock(&bridge_lock);
list_add_tail(&bridge->list, &bridge_list);
mutex_unlock(&bridge_lock);
return 0;
}
EXPORT_SYMBOL(drm_bridge_add);
void drm_bridge_remove(struct drm_bridge *bridge)
{
mutex_lock(&bridge_lock);
list_del_init(&bridge->list);
mutex_unlock(&bridge_lock);
}
EXPORT_SYMBOL(drm_bridge_remove);
extern int drm_bridge_attach(struct drm_device *dev, struct drm_bridge *bridge)
{
if (!dev || !bridge)
return -EINVAL;
if (bridge->dev)
return -EBUSY;
bridge->dev = dev;
if (bridge->funcs->attach)
return bridge->funcs->attach(bridge);
return 0;
}
EXPORT_SYMBOL(drm_bridge_attach);
#ifdef CONFIG_OF
struct drm_bridge *of_drm_find_bridge(struct device_node *np)
{
struct drm_bridge *bridge;
mutex_lock(&bridge_lock);
list_for_each_entry(bridge, &bridge_list, list) {
if (bridge->of_node == np) {
mutex_unlock(&bridge_lock);
return bridge;
}
}
mutex_unlock(&bridge_lock);
return NULL;
}
EXPORT_SYMBOL(of_drm_find_bridge);
#endif
MODULE_AUTHOR("Ajay Kumar <ajaykumar.rs@samsung.com>");
MODULE_DESCRIPTION("DRM bridge infrastructure");
MODULE_LICENSE("GPL and additional rights");

View File

@ -32,6 +32,7 @@
#include <drm/drmP.h>
#if defined(CONFIG_X86)
#include <asm/smp.h>
/*
* clflushopt is an unordered instruction which needs fencing with mfence or
@ -64,12 +65,6 @@ static void drm_cache_flush_clflush(struct page *pages[],
drm_clflush_page(*pages++);
mb();
}
static void
drm_clflush_ipi_handler(void *null)
{
wbinvd();
}
#endif
void
@ -82,7 +77,7 @@ drm_clflush_pages(struct page *pages[], unsigned long num_pages)
return;
}
if (on_each_cpu(drm_clflush_ipi_handler, NULL, 1) != 0)
if (wbinvd_on_all_cpus())
printk(KERN_ERR "Timed out waiting for cache flush.\n");
#elif defined(__powerpc__)
@ -121,7 +116,7 @@ drm_clflush_sg(struct sg_table *st)
return;
}
if (on_each_cpu(drm_clflush_ipi_handler, NULL, 1) != 0)
if (wbinvd_on_all_cpus())
printk(KERN_ERR "Timed out waiting for cache flush.\n");
#else
printk(KERN_ERR "Architecture has no drm_cache.c support\n");
@ -144,7 +139,7 @@ drm_clflush_virt_range(void *addr, unsigned long length)
return;
}
if (on_each_cpu(drm_clflush_ipi_handler, NULL, 1) != 0)
if (wbinvd_on_all_cpus())
printk(KERN_ERR "Timed out waiting for cache flush.\n");
#else
printk(KERN_ERR "Architecture has no drm_cache.c support\n");

File diff suppressed because it is too large Load Diff

View File

@ -946,6 +946,7 @@ int drm_helper_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mod
crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
if (!crtc_state)
return -ENOMEM;
crtc_state->crtc = crtc;
crtc_state->enable = true;
crtc_state->planes_changed = true;
@ -1005,6 +1006,7 @@ int drm_helper_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
plane_state = kzalloc(sizeof(*plane_state), GFP_KERNEL);
if (!plane_state)
return -ENOMEM;
plane_state->plane = plane;
plane_state->crtc = crtc;
drm_atomic_set_fb_for_plane(plane_state, crtc->primary->fb);

View File

@ -36,3 +36,9 @@ int drm_mode_object_get(struct drm_device *dev,
void drm_mode_object_put(struct drm_device *dev,
struct drm_mode_object *object);
/* drm_atomic.c */
int drm_atomic_get_property(struct drm_mode_object *obj,
struct drm_property *property, uint64_t *val);
int drm_mode_atomic_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);

View File

@ -353,6 +353,37 @@ int drm_dp_link_power_up(struct drm_dp_aux *aux, struct drm_dp_link *link)
}
EXPORT_SYMBOL(drm_dp_link_power_up);
/**
* drm_dp_link_power_down() - power down a DisplayPort link
* @aux: DisplayPort AUX channel
* @link: pointer to a structure containing the link configuration
*
* Returns 0 on success or a negative error code on failure.
*/
int drm_dp_link_power_down(struct drm_dp_aux *aux, struct drm_dp_link *link)
{
u8 value;
int err;
/* DP_SET_POWER register is only available on DPCD v1.1 and later */
if (link->revision < 0x11)
return 0;
err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
if (err < 0)
return err;
value &= ~DP_SET_POWER_MASK;
value |= DP_SET_POWER_D3;
err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
if (err < 0)
return err;
return 0;
}
EXPORT_SYMBOL(drm_dp_link_power_down);
/**
* drm_dp_link_configure() - configure a DisplayPort link
* @aux: DisplayPort AUX channel

View File

@ -40,15 +40,19 @@
unsigned int drm_debug = 0; /* 1 to enable debug output */
EXPORT_SYMBOL(drm_debug);
bool drm_atomic = 0;
MODULE_AUTHOR(CORE_AUTHOR);
MODULE_DESCRIPTION(CORE_DESC);
MODULE_LICENSE("GPL and additional rights");
MODULE_PARM_DESC(debug, "Enable debug output");
MODULE_PARM_DESC(atomic, "Enable experimental atomic KMS API");
MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs] (0: never disable, <0: disable immediately)");
MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]");
MODULE_PARM_DESC(timestamp_monotonic, "Use monotonic timestamps");
module_param_named(debug, drm_debug, int, 0600);
module_param_named_unsafe(atomic, drm_atomic, bool, 0600);
static DEFINE_SPINLOCK(drm_minor_lock);
static struct idr drm_minors_idr;

View File

@ -1722,7 +1722,7 @@ out:
* RETURNS:
* Zero if everything went ok, nonzero otherwise.
*/
bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel)
int drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel)
{
struct drm_device *dev = fb_helper->dev;
int count = 0;

View File

@ -478,64 +478,59 @@ int drm_release(struct inode *inode, struct file *filp)
}
EXPORT_SYMBOL(drm_release);
static bool
drm_dequeue_event(struct drm_file *file_priv,
size_t total, size_t max, struct drm_pending_event **out)
{
struct drm_device *dev = file_priv->minor->dev;
struct drm_pending_event *e;
unsigned long flags;
bool ret = false;
spin_lock_irqsave(&dev->event_lock, flags);
*out = NULL;
if (list_empty(&file_priv->event_list))
goto out;
e = list_first_entry(&file_priv->event_list,
struct drm_pending_event, link);
if (e->event->length + total > max)
goto out;
file_priv->event_space += e->event->length;
list_del(&e->link);
*out = e;
ret = true;
out:
spin_unlock_irqrestore(&dev->event_lock, flags);
return ret;
}
ssize_t drm_read(struct file *filp, char __user *buffer,
size_t count, loff_t *offset)
{
struct drm_file *file_priv = filp->private_data;
struct drm_pending_event *e;
size_t total;
ssize_t ret;
struct drm_device *dev = file_priv->minor->dev;
ssize_t ret = 0;
if ((filp->f_flags & O_NONBLOCK) == 0) {
ret = wait_event_interruptible(file_priv->event_wait,
!list_empty(&file_priv->event_list));
if (ret < 0)
return ret;
}
if (!access_ok(VERIFY_WRITE, buffer, count))
return -EFAULT;
total = 0;
while (drm_dequeue_event(file_priv, total, count, &e)) {
if (copy_to_user(buffer + total,
e->event, e->event->length)) {
total = -EFAULT;
spin_lock_irq(&dev->event_lock);
for (;;) {
if (list_empty(&file_priv->event_list)) {
if (ret)
break;
if (filp->f_flags & O_NONBLOCK) {
ret = -EAGAIN;
break;
}
spin_unlock_irq(&dev->event_lock);
ret = wait_event_interruptible(file_priv->event_wait,
!list_empty(&file_priv->event_list));
spin_lock_irq(&dev->event_lock);
if (ret < 0)
break;
ret = 0;
} else {
struct drm_pending_event *e;
e = list_first_entry(&file_priv->event_list,
struct drm_pending_event, link);
if (e->event->length + ret > count)
break;
if (__copy_to_user_inatomic(buffer + ret,
e->event, e->event->length)) {
if (ret == 0)
ret = -EFAULT;
break;
}
file_priv->event_space += e->event->length;
ret += e->event->length;
list_del(&e->link);
e->destroy(e);
break;
}
total += e->event->length;
e->destroy(e);
}
spin_unlock_irq(&dev->event_lock);
return total ?: -EAGAIN;
return ret;
}
EXPORT_SYMBOL(drm_read);

View File

@ -152,30 +152,6 @@ int drm_bufs_info(struct seq_file *m, void *data)
return 0;
}
/**
* Called when "/proc/dri/.../vblank" is read.
*/
int drm_vblank_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
int crtc;
mutex_lock(&dev->struct_mutex);
for (crtc = 0; crtc < dev->num_crtcs; crtc++) {
seq_printf(m, "CRTC %d enable: %d\n",
crtc, atomic_read(&dev->vblank[crtc].refcount));
seq_printf(m, "CRTC %d counter: %d\n",
crtc, drm_vblank_count(dev, crtc));
seq_printf(m, "CRTC %d last wait: %d\n",
crtc, dev->vblank[crtc].last_wait);
seq_printf(m, "CRTC %d in modeset: %d\n",
crtc, dev->vblank[crtc].inmodeset);
}
mutex_unlock(&dev->struct_mutex);
return 0;
}
/**
* Called when "/proc/dri/.../clients" is read.
*

View File

@ -55,7 +55,6 @@ void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpr
int drm_name_info(struct seq_file *m, void *data);
int drm_vm_info(struct seq_file *m, void *data);
int drm_bufs_info(struct seq_file *m, void *data);
int drm_vblank_info(struct seq_file *m, void *data);
int drm_clients_info(struct seq_file *m, void* data);
int drm_gem_name_info(struct seq_file *m, void *data);

View File

@ -32,6 +32,7 @@
#include <drm/drm_core.h>
#include "drm_legacy.h"
#include "drm_internal.h"
#include "drm_crtc_internal.h"
#include <linux/pci.h>
#include <linux/export.h>
@ -345,6 +346,17 @@ drm_setclientcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
return -EINVAL;
file_priv->universal_planes = req->value;
break;
case DRM_CLIENT_CAP_ATOMIC:
/* for now, hide behind experimental drm.atomic moduleparam */
if (!drm_atomic)
return -EINVAL;
if (!drm_core_check_feature(dev, DRIVER_ATOMIC))
return -EINVAL;
if (req->value > 1)
return -EINVAL;
file_priv->atomic = req->value;
file_priv->universal_planes = req->value;
break;
default:
return -EINVAL;
}
@ -620,6 +632,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_GETPROPERTIES, drm_mode_obj_get_properties_ioctl, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_SETPROPERTY, drm_mode_obj_set_property_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR2, drm_mode_cursor2_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATOMIC, drm_mode_atomic_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
};
#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )

View File

@ -185,8 +185,15 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
return;
}
dev->driver->disable_vblank(dev, crtc);
vblank->enabled = false;
/*
* Only disable vblank interrupts if they're enabled. This avoids
* calling the ->disable_vblank() operation in atomic context with the
* hardware potentially runtime suspended.
*/
if (vblank->enabled) {
dev->driver->disable_vblank(dev, crtc);
vblank->enabled = false;
}
/* No further vblank irq's will be processed after
* this point. Get current hardware vblank count and
@ -778,7 +785,7 @@ static struct timeval get_drm_timestamp(void)
/**
* drm_get_last_vbltimestamp - retrieve raw timestamp for the most recent
* vblank interval
* vblank interval
* @dev: DRM device
* @crtc: which CRTC's vblank timestamp to retrieve
* @tvblank: Pointer to target struct timeval which should receive the timestamp
@ -933,6 +940,7 @@ void drm_send_vblank_event(struct drm_device *dev, int crtc,
{
struct timeval now;
unsigned int seq;
if (crtc >= 0) {
seq = drm_vblank_count_and_time(dev, crtc, &now);
} else {
@ -1422,7 +1430,7 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
unsigned int seq;
int ret;
e = kzalloc(sizeof *e, GFP_KERNEL);
e = kzalloc(sizeof(*e), GFP_KERNEL);
if (e == NULL) {
ret = -ENOMEM;
goto err_put;
@ -1431,7 +1439,7 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
e->pipe = pipe;
e->base.pid = current->pid;
e->event.base.type = DRM_EVENT_VBLANK;
e->event.base.length = sizeof e->event;
e->event.base.length = sizeof(e->event);
e->event.user_data = vblwait->request.signal;
e->base.event = &e->event.base;
e->base.file_priv = file_priv;
@ -1451,12 +1459,12 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
goto err_unlock;
}
if (file_priv->event_space < sizeof e->event) {
if (file_priv->event_space < sizeof(e->event)) {
ret = -EBUSY;
goto err_unlock;
}
file_priv->event_space -= sizeof e->event;
file_priv->event_space -= sizeof(e->event);
seq = drm_vblank_count_and_time(dev, pipe, &now);
if ((vblwait->request.type & _DRM_VBLANK_NEXTONMISS) &&

View File

@ -323,8 +323,6 @@ EXPORT_SYMBOL(mipi_dsi_packet_format_is_long);
int mipi_dsi_create_packet(struct mipi_dsi_packet *packet,
const struct mipi_dsi_msg *msg)
{
const u8 *tx = msg->tx_buf;
if (!packet || !msg)
return -EINVAL;
@ -353,8 +351,10 @@ int mipi_dsi_create_packet(struct mipi_dsi_packet *packet,
packet->header[2] = (msg->tx_len >> 8) & 0xff;
packet->payload_length = msg->tx_len;
packet->payload = tx;
packet->payload = msg->tx_buf;
} else {
const u8 *tx = msg->tx_buf;
packet->header[1] = (msg->tx_len > 0) ? tx[0] : 0;
packet->header[2] = (msg->tx_len > 1) ? tx[1] : 0;
}

View File

@ -615,6 +615,46 @@ void drm_display_mode_from_videomode(const struct videomode *vm,
}
EXPORT_SYMBOL_GPL(drm_display_mode_from_videomode);
/**
* drm_display_mode_to_videomode - fill in @vm using @dmode,
* @dmode: drm_display_mode structure to use as source
* @vm: videomode structure to use as destination
*
* Fills out @vm using the display mode specified in @dmode.
*/
void drm_display_mode_to_videomode(const struct drm_display_mode *dmode,
struct videomode *vm)
{
vm->hactive = dmode->hdisplay;
vm->hfront_porch = dmode->hsync_start - dmode->hdisplay;
vm->hsync_len = dmode->hsync_end - dmode->hsync_start;
vm->hback_porch = dmode->htotal - dmode->hsync_end;
vm->vactive = dmode->vdisplay;
vm->vfront_porch = dmode->vsync_start - dmode->vdisplay;
vm->vsync_len = dmode->vsync_end - dmode->vsync_start;
vm->vback_porch = dmode->vtotal - dmode->vsync_end;
vm->pixelclock = dmode->clock * 1000;
vm->flags = 0;
if (dmode->flags & DRM_MODE_FLAG_PHSYNC)
vm->flags |= DISPLAY_FLAGS_HSYNC_HIGH;
else if (dmode->flags & DRM_MODE_FLAG_NHSYNC)
vm->flags |= DISPLAY_FLAGS_HSYNC_LOW;
if (dmode->flags & DRM_MODE_FLAG_PVSYNC)
vm->flags |= DISPLAY_FLAGS_VSYNC_HIGH;
else if (dmode->flags & DRM_MODE_FLAG_NVSYNC)
vm->flags |= DISPLAY_FLAGS_VSYNC_LOW;
if (dmode->flags & DRM_MODE_FLAG_INTERLACE)
vm->flags |= DISPLAY_FLAGS_INTERLACED;
if (dmode->flags & DRM_MODE_FLAG_DBLSCAN)
vm->flags |= DISPLAY_FLAGS_DOUBLESCAN;
if (dmode->flags & DRM_MODE_FLAG_DBLCLK)
vm->flags |= DISPLAY_FLAGS_DOUBLECLK;
}
EXPORT_SYMBOL_GPL(drm_display_mode_to_videomode);
#ifdef CONFIG_OF
/**
* of_get_drm_display_mode - get a drm_display_mode from devicetree
@ -739,6 +779,8 @@ EXPORT_SYMBOL(drm_mode_vrefresh);
* - The CRTC_STEREO_DOUBLE flag can be used to compute the timings for
* buffers containing two eyes (only adjust the timings when needed, eg. for
* "frame packing" or "side by side full").
* - The CRTC_NO_DBLSCAN and CRTC_NO_VSCAN flags request that adjustment *not*
* be performed for doublescan and vscan > 1 modes respectively.
*/
void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags)
{
@ -765,18 +807,22 @@ void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags)
}
}
if (p->flags & DRM_MODE_FLAG_DBLSCAN) {
p->crtc_vdisplay *= 2;
p->crtc_vsync_start *= 2;
p->crtc_vsync_end *= 2;
p->crtc_vtotal *= 2;
if (!(adjust_flags & CRTC_NO_DBLSCAN)) {
if (p->flags & DRM_MODE_FLAG_DBLSCAN) {
p->crtc_vdisplay *= 2;
p->crtc_vsync_start *= 2;
p->crtc_vsync_end *= 2;
p->crtc_vtotal *= 2;
}
}
if (p->vscan > 1) {
p->crtc_vdisplay *= p->vscan;
p->crtc_vsync_start *= p->vscan;
p->crtc_vsync_end *= p->vscan;
p->crtc_vtotal *= p->vscan;
if (!(adjust_flags & CRTC_NO_VSCAN)) {
if (p->vscan > 1) {
p->crtc_vdisplay *= p->vscan;
p->crtc_vsync_start *= p->vscan;
p->crtc_vsync_end *= p->vscan;
p->crtc_vtotal *= p->vscan;
}
}
if (adjust_flags & CRTC_STEREO_DOUBLE) {
@ -905,10 +951,41 @@ bool drm_mode_equal_no_clocks_no_stereo(const struct drm_display_mode *mode1,
}
EXPORT_SYMBOL(drm_mode_equal_no_clocks_no_stereo);
/**
* drm_mode_validate_basic - make sure the mode is somewhat sane
* @mode: mode to check
*
* Check that the mode timings are at least somewhat reasonable.
* Any hardware specific limits are left up for each driver to check.
*
* Returns:
* The mode status
*/
enum drm_mode_status
drm_mode_validate_basic(const struct drm_display_mode *mode)
{
if (mode->clock == 0)
return MODE_CLOCK_LOW;
if (mode->hdisplay == 0 ||
mode->hsync_start < mode->hdisplay ||
mode->hsync_end < mode->hsync_start ||
mode->htotal < mode->hsync_end)
return MODE_H_ILLEGAL;
if (mode->vdisplay == 0 ||
mode->vsync_start < mode->vdisplay ||
mode->vsync_end < mode->vsync_start ||
mode->vtotal < mode->vsync_end)
return MODE_V_ILLEGAL;
return MODE_OK;
}
EXPORT_SYMBOL(drm_mode_validate_basic);
/**
* drm_mode_validate_size - make sure modes adhere to size constraints
* @dev: DRM device
* @mode_list: list of modes to check
* @mode: mode to check
* @maxX: maximum width
* @maxY: maximum height
*
@ -916,23 +993,80 @@ EXPORT_SYMBOL(drm_mode_equal_no_clocks_no_stereo);
* limitations of the DRM device/connector. If a mode is too big its status
* member is updated with the appropriate validation failure code. The list
* itself is not changed.
*
* Returns:
* The mode status
*/
void drm_mode_validate_size(struct drm_device *dev,
struct list_head *mode_list,
int maxX, int maxY)
enum drm_mode_status
drm_mode_validate_size(const struct drm_display_mode *mode,
int maxX, int maxY)
{
struct drm_display_mode *mode;
if (maxX > 0 && mode->hdisplay > maxX)
return MODE_VIRTUAL_X;
list_for_each_entry(mode, mode_list, head) {
if (maxX > 0 && mode->hdisplay > maxX)
mode->status = MODE_VIRTUAL_X;
if (maxY > 0 && mode->vdisplay > maxY)
return MODE_VIRTUAL_Y;
if (maxY > 0 && mode->vdisplay > maxY)
mode->status = MODE_VIRTUAL_Y;
}
return MODE_OK;
}
EXPORT_SYMBOL(drm_mode_validate_size);
#define MODE_STATUS(status) [MODE_ ## status + 3] = #status
static const char * const drm_mode_status_names[] = {
MODE_STATUS(OK),
MODE_STATUS(HSYNC),
MODE_STATUS(VSYNC),
MODE_STATUS(H_ILLEGAL),
MODE_STATUS(V_ILLEGAL),
MODE_STATUS(BAD_WIDTH),
MODE_STATUS(NOMODE),
MODE_STATUS(NO_INTERLACE),
MODE_STATUS(NO_DBLESCAN),
MODE_STATUS(NO_VSCAN),
MODE_STATUS(MEM),
MODE_STATUS(VIRTUAL_X),
MODE_STATUS(VIRTUAL_Y),
MODE_STATUS(MEM_VIRT),
MODE_STATUS(NOCLOCK),
MODE_STATUS(CLOCK_HIGH),
MODE_STATUS(CLOCK_LOW),
MODE_STATUS(CLOCK_RANGE),
MODE_STATUS(BAD_HVALUE),
MODE_STATUS(BAD_VVALUE),
MODE_STATUS(BAD_VSCAN),
MODE_STATUS(HSYNC_NARROW),
MODE_STATUS(HSYNC_WIDE),
MODE_STATUS(HBLANK_NARROW),
MODE_STATUS(HBLANK_WIDE),
MODE_STATUS(VSYNC_NARROW),
MODE_STATUS(VSYNC_WIDE),
MODE_STATUS(VBLANK_NARROW),
MODE_STATUS(VBLANK_WIDE),
MODE_STATUS(PANEL),
MODE_STATUS(INTERLACE_WIDTH),
MODE_STATUS(ONE_WIDTH),
MODE_STATUS(ONE_HEIGHT),
MODE_STATUS(ONE_SIZE),
MODE_STATUS(NO_REDUCED),
MODE_STATUS(NO_STEREO),
MODE_STATUS(UNVERIFIED),
MODE_STATUS(BAD),
MODE_STATUS(ERROR),
};
#undef MODE_STATUS
static const char *drm_get_mode_status_name(enum drm_mode_status status)
{
int index = status + 3;
if (WARN_ON(index < 0 || index >= ARRAY_SIZE(drm_mode_status_names)))
return "";
return drm_mode_status_names[index];
}
/**
* drm_mode_prune_invalid - remove invalid modes from mode list
* @dev: DRM device
@ -954,8 +1088,9 @@ void drm_mode_prune_invalid(struct drm_device *dev,
list_del(&mode->head);
if (verbose) {
drm_mode_debug_printmodeline(mode);
DRM_DEBUG_KMS("Not using %s mode %d\n",
mode->name, mode->status);
DRM_DEBUG_KMS("Not using %s mode: %s\n",
mode->name,
drm_get_mode_status_name(mode->status));
}
drm_mode_destroy(dev, mode);
}

View File

@ -142,6 +142,17 @@ int drm_plane_helper_check_update(struct drm_plane *plane,
{
int hscale, vscale;
if (!fb) {
*visible = false;
return 0;
}
/* crtc should only be NULL when disabling (i.e., !fb) */
if (WARN_ON(!crtc)) {
*visible = false;
return 0;
}
if (!crtc->enabled && !can_update_disabled) {
DRM_DEBUG_KMS("Cannot update plane of a disabled CRTC.\n");
return -EINVAL;
@ -155,11 +166,6 @@ int drm_plane_helper_check_update(struct drm_plane *plane,
return -ERANGE;
}
if (!fb) {
*visible = false;
return 0;
}
*visible = drm_rect_clip_scaled(src, dest, clip, hscale, vscale);
if (!*visible)
/*
@ -429,7 +435,8 @@ int drm_plane_helper_commit(struct drm_plane *plane,
goto out;
}
if (plane_funcs->prepare_fb && plane_state->fb) {
if (plane_funcs->prepare_fb && plane_state->fb &&
plane_state->fb != old_fb) {
ret = plane_funcs->prepare_fb(plane, plane_state->fb);
if (ret)
goto out;
@ -443,13 +450,28 @@ int drm_plane_helper_commit(struct drm_plane *plane,
crtc_funcs[i]->atomic_begin(crtc[i]);
}
plane_funcs->atomic_update(plane, plane_state);
/*
* Drivers may optionally implement the ->atomic_disable callback, so
* special-case that here.
*/
if (drm_atomic_plane_disabling(plane, plane_state) &&
plane_funcs->atomic_disable)
plane_funcs->atomic_disable(plane, plane_state);
else
plane_funcs->atomic_update(plane, plane_state);
for (i = 0; i < 2; i++) {
if (crtc_funcs[i] && crtc_funcs[i]->atomic_flush)
crtc_funcs[i]->atomic_flush(crtc[i]);
}
/*
* If we only moved the plane and didn't change fb's, there's no need to
* wait for vblank.
*/
if (plane->state->fb == old_fb)
goto out;
for (i = 0; i < 2; i++) {
if (!crtc[i])
continue;
@ -478,7 +500,7 @@ out:
}
/**
* drm_plane_helper_update() - Helper for primary plane update
* drm_plane_helper_update() - Transitional helper for plane update
* @plane: plane object to update
* @crtc: owning CRTC of owning plane
* @fb: framebuffer to flip onto plane
@ -517,6 +539,7 @@ int drm_plane_helper_update(struct drm_plane *plane, struct drm_crtc *crtc,
plane_state = kzalloc(sizeof(*plane_state), GFP_KERNEL);
if (!plane_state)
return -ENOMEM;
plane_state->plane = plane;
plane_state->crtc = crtc;
drm_atomic_set_fb_for_plane(plane_state, fb);
@ -534,7 +557,7 @@ int drm_plane_helper_update(struct drm_plane *plane, struct drm_crtc *crtc,
EXPORT_SYMBOL(drm_plane_helper_update);
/**
* drm_plane_helper_disable() - Helper for primary plane disable
* drm_plane_helper_disable() - Transitional helper for plane disable
* @plane: plane to disable
*
* Provides a default plane disable handler using the atomic plane update
@ -563,6 +586,7 @@ int drm_plane_helper_disable(struct drm_plane *plane)
plane_state = kzalloc(sizeof(*plane_state), GFP_KERNEL);
if (!plane_state)
return -ENOMEM;
plane_state->plane = plane;
plane_state->crtc = NULL;
drm_atomic_set_fb_for_plane(plane_state, NULL);

View File

@ -58,28 +58,23 @@
static bool drm_kms_helper_poll = true;
module_param_named(poll, drm_kms_helper_poll, bool, 0600);
static void drm_mode_validate_flag(struct drm_connector *connector,
int flags)
static enum drm_mode_status
drm_mode_validate_flag(const struct drm_display_mode *mode,
int flags)
{
struct drm_display_mode *mode;
if ((mode->flags & DRM_MODE_FLAG_INTERLACE) &&
!(flags & DRM_MODE_FLAG_INTERLACE))
return MODE_NO_INTERLACE;
if (flags == (DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_INTERLACE |
DRM_MODE_FLAG_3D_MASK))
return;
if ((mode->flags & DRM_MODE_FLAG_DBLSCAN) &&
!(flags & DRM_MODE_FLAG_DBLSCAN))
return MODE_NO_DBLESCAN;
list_for_each_entry(mode, &connector->modes, head) {
if ((mode->flags & DRM_MODE_FLAG_INTERLACE) &&
!(flags & DRM_MODE_FLAG_INTERLACE))
mode->status = MODE_NO_INTERLACE;
if ((mode->flags & DRM_MODE_FLAG_DBLSCAN) &&
!(flags & DRM_MODE_FLAG_DBLSCAN))
mode->status = MODE_NO_DBLESCAN;
if ((mode->flags & DRM_MODE_FLAG_3D_MASK) &&
!(flags & DRM_MODE_FLAG_3D_MASK))
mode->status = MODE_NO_STEREO;
}
if ((mode->flags & DRM_MODE_FLAG_3D_MASK) &&
!(flags & DRM_MODE_FLAG_3D_MASK))
return MODE_NO_STEREO;
return;
return MODE_OK;
}
static int drm_helper_probe_add_cmdline_mode(struct drm_connector *connector)
@ -108,6 +103,7 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect
int count = 0;
int mode_flags = 0;
bool verbose_prune = true;
enum drm_connector_status old_status;
WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
@ -126,7 +122,33 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect
if (connector->funcs->force)
connector->funcs->force(connector);
} else {
old_status = connector->status;
connector->status = connector->funcs->detect(connector, true);
/*
* Normally either the driver's hpd code or the poll loop should
* pick up any changes and fire the hotplug event. But if
* userspace sneaks in a probe, we might miss a change. Hence
* check here, and if anything changed start the hotplug code.
*/
if (old_status != connector->status) {
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n",
connector->base.id,
connector->name,
old_status, connector->status);
/*
* The hotplug event code might call into the fb
* helpers, and so expects that we do not hold any
* locks. Fire up the poll struct instead, it will
* disable itself again.
*/
dev->mode_config.delayed_event = true;
if (dev->mode_config.poll_enabled)
schedule_delayed_work(&dev->mode_config.output_poll_work,
0);
}
}
/* Re-enable polling in case the global poll config changed. */
@ -164,18 +186,22 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect
drm_mode_connector_list_update(connector, merge_type_bits);
if (maxX && maxY)
drm_mode_validate_size(dev, &connector->modes, maxX, maxY);
if (connector->interlace_allowed)
mode_flags |= DRM_MODE_FLAG_INTERLACE;
if (connector->doublescan_allowed)
mode_flags |= DRM_MODE_FLAG_DBLSCAN;
if (connector->stereo_allowed)
mode_flags |= DRM_MODE_FLAG_3D_MASK;
drm_mode_validate_flag(connector, mode_flags);
list_for_each_entry(mode, &connector->modes, head) {
mode->status = drm_mode_validate_basic(mode);
if (mode->status == MODE_OK)
mode->status = drm_mode_validate_size(mode, maxX, maxY);
if (mode->status == MODE_OK)
mode->status = drm_mode_validate_flag(mode, mode_flags);
if (mode->status == MODE_OK && connector_funcs->mode_valid)
mode->status = connector_funcs->mode_valid(connector,
mode);
@ -275,10 +301,14 @@ static void output_poll_execute(struct work_struct *work)
struct drm_device *dev = container_of(delayed_work, struct drm_device, mode_config.output_poll_work);
struct drm_connector *connector;
enum drm_connector_status old_status;
bool repoll = false, changed = false;
bool repoll = false, changed;
/* Pick up any changes detected by the probe functions. */
changed = dev->mode_config.delayed_event;
dev->mode_config.delayed_event = false;
if (!drm_kms_helper_poll)
return;
goto out;
mutex_lock(&dev->mode_config.mutex);
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
@ -305,6 +335,24 @@ static void output_poll_execute(struct work_struct *work)
if (old_status != connector->status) {
const char *old, *new;
/*
* The poll work sets force=false when calling detect so
* that drivers can avoid to do disruptive tests (e.g.
* when load detect cycles could cause flickering on
* other, running displays). This bears the risk that we
* flip-flop between unknown here in the poll work and
* the real state when userspace forces a full detect
* call after receiving a hotplug event due to this
* change.
*
* Hence clamp an unknown detect status to the old
* value.
*/
if (connector->status == connector_status_unknown) {
connector->status = old_status;
continue;
}
old = drm_get_connector_status_name(old_status);
new = drm_get_connector_status_name(connector->status);
@ -320,6 +368,7 @@ static void output_poll_execute(struct work_struct *work)
mutex_unlock(&dev->mode_config.mutex);
out:
if (changed)
drm_kms_helper_hotplug_event(dev);

View File

@ -339,19 +339,51 @@ static ssize_t select_subconnector_show(struct device *device,
drm_get_dvi_i_select_name((int)subconnector));
}
static struct device_attribute connector_attrs[] = {
__ATTR_RO(status),
__ATTR_RO(enabled),
__ATTR_RO(dpms),
__ATTR_RO(modes),
static DEVICE_ATTR_RO(status);
static DEVICE_ATTR_RO(enabled);
static DEVICE_ATTR_RO(dpms);
static DEVICE_ATTR_RO(modes);
static struct attribute *connector_dev_attrs[] = {
&dev_attr_status.attr,
&dev_attr_enabled.attr,
&dev_attr_dpms.attr,
&dev_attr_modes.attr,
NULL
};
/* These attributes are for both DVI-I connectors and all types of tv-out. */
static struct device_attribute connector_attrs_opt1[] = {
__ATTR_RO(subconnector),
__ATTR_RO(select_subconnector),
static DEVICE_ATTR_RO(subconnector);
static DEVICE_ATTR_RO(select_subconnector);
static struct attribute *connector_opt_dev_attrs[] = {
&dev_attr_subconnector.attr,
&dev_attr_select_subconnector.attr,
NULL
};
static umode_t connector_opt_dev_is_visible(struct kobject *kobj,
struct attribute *attr, int idx)
{
struct device *dev = kobj_to_dev(kobj);
struct drm_connector *connector = to_drm_connector(dev);
/*
* In the long run it maybe a good idea to make one set of
* optionals per connector type.
*/
switch (connector->connector_type) {
case DRM_MODE_CONNECTOR_DVII:
case DRM_MODE_CONNECTOR_Composite:
case DRM_MODE_CONNECTOR_SVIDEO:
case DRM_MODE_CONNECTOR_Component:
case DRM_MODE_CONNECTOR_TV:
return attr->mode;
}
return 0;
}
static struct bin_attribute edid_attr = {
.attr.name = "edid",
.attr.mode = 0444,
@ -359,6 +391,27 @@ static struct bin_attribute edid_attr = {
.read = edid_show,
};
static struct bin_attribute *connector_bin_attrs[] = {
&edid_attr,
NULL
};
static const struct attribute_group connector_dev_group = {
.attrs = connector_dev_attrs,
.bin_attrs = connector_bin_attrs,
};
static const struct attribute_group connector_opt_dev_group = {
.attrs = connector_opt_dev_attrs,
.is_visible = connector_opt_dev_is_visible,
};
static const struct attribute_group *connector_dev_groups[] = {
&connector_dev_group,
&connector_opt_dev_group,
NULL
};
/**
* drm_sysfs_connector_add - add a connector to sysfs
* @connector: connector to add
@ -371,73 +424,27 @@ static struct bin_attribute edid_attr = {
int drm_sysfs_connector_add(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
int attr_cnt = 0;
int opt_cnt = 0;
int i;
int ret;
if (connector->kdev)
return 0;
connector->kdev = device_create(drm_class, dev->primary->kdev,
0, connector, "card%d-%s",
dev->primary->index, connector->name);
connector->kdev =
device_create_with_groups(drm_class, dev->primary->kdev, 0,
connector, connector_dev_groups,
"card%d-%s", dev->primary->index,
connector->name);
DRM_DEBUG("adding \"%s\" to sysfs\n",
connector->name);
if (IS_ERR(connector->kdev)) {
DRM_ERROR("failed to register connector device: %ld\n", PTR_ERR(connector->kdev));
ret = PTR_ERR(connector->kdev);
goto out;
return PTR_ERR(connector->kdev);
}
/* Standard attributes */
for (attr_cnt = 0; attr_cnt < ARRAY_SIZE(connector_attrs); attr_cnt++) {
ret = device_create_file(connector->kdev, &connector_attrs[attr_cnt]);
if (ret)
goto err_out_files;
}
/* Optional attributes */
/*
* In the long run it maybe a good idea to make one set of
* optionals per connector type.
*/
switch (connector->connector_type) {
case DRM_MODE_CONNECTOR_DVII:
case DRM_MODE_CONNECTOR_Composite:
case DRM_MODE_CONNECTOR_SVIDEO:
case DRM_MODE_CONNECTOR_Component:
case DRM_MODE_CONNECTOR_TV:
for (opt_cnt = 0; opt_cnt < ARRAY_SIZE(connector_attrs_opt1); opt_cnt++) {
ret = device_create_file(connector->kdev, &connector_attrs_opt1[opt_cnt]);
if (ret)
goto err_out_files;
}
break;
default:
break;
}
ret = sysfs_create_bin_file(&connector->kdev->kobj, &edid_attr);
if (ret)
goto err_out_files;
/* Let userspace know we have a new connector */
drm_sysfs_hotplug_event(dev);
return 0;
err_out_files:
for (i = 0; i < opt_cnt; i++)
device_remove_file(connector->kdev, &connector_attrs_opt1[i]);
for (i = 0; i < attr_cnt; i++)
device_remove_file(connector->kdev, &connector_attrs[i]);
device_unregister(connector->kdev);
out:
return ret;
}
/**
@ -455,16 +462,11 @@ out:
*/
void drm_sysfs_connector_remove(struct drm_connector *connector)
{
int i;
if (!connector->kdev)
return;
DRM_DEBUG("removing \"%s\" from sysfs\n",
connector->name);
for (i = 0; i < ARRAY_SIZE(connector_attrs); i++)
device_remove_file(connector->kdev, &connector_attrs[i]);
sysfs_remove_bin_file(&connector->kdev->kobj, &edid_attr);
device_unregister(connector->kdev);
connector->kdev = NULL;
}

View File

@ -6,23 +6,15 @@ config DRM_EXYNOS
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
select VT_HW_CONSOLE_BINDING if FRAMEBUFFER_CONSOLE
select VIDEOMODE_HELPERS
help
Choose this option if you have a Samsung SoC EXYNOS chipset.
If M is selected the module will be called exynosdrm.
config DRM_EXYNOS_IOMMU
bool "EXYNOS DRM IOMMU Support"
bool
depends on DRM_EXYNOS && EXYNOS_IOMMU && ARM_DMA_USE_IOMMU
help
Choose this option if you want to use IOMMU feature for DRM.
config DRM_EXYNOS_DMABUF
bool "EXYNOS DRM DMABUF"
depends on DRM_EXYNOS
help
Choose this option if you want to use DMABUF feature for DRM.
default y
config DRM_EXYNOS_FIMD
bool "Exynos DRM FIMD"
@ -32,9 +24,16 @@ config DRM_EXYNOS_FIMD
help
Choose this option if you want to use Exynos FIMD for DRM.
config DRM_EXYNOS7_DECON
bool "Exynos DRM DECON"
depends on DRM_EXYNOS
select FB_MODE_HELPERS
help
Choose this option if you want to use Exynos DECON for DRM.
config DRM_EXYNOS_DPI
bool "EXYNOS DRM parallel output support"
depends on DRM_EXYNOS_FIMD
depends on (DRM_EXYNOS_FIMD || DRM_EXYNOS7_DECON)
select DRM_PANEL
default n
help
@ -42,7 +41,7 @@ config DRM_EXYNOS_DPI
config DRM_EXYNOS_DSI
bool "EXYNOS DRM MIPI-DSI driver support"
depends on DRM_EXYNOS_FIMD
depends on (DRM_EXYNOS_FIMD || DRM_EXYNOS7_DECON)
select DRM_MIPI_DSI
select DRM_PANEL
default n
@ -51,7 +50,7 @@ config DRM_EXYNOS_DSI
config DRM_EXYNOS_DP
bool "EXYNOS DRM DP driver support"
depends on DRM_EXYNOS_FIMD && ARCH_EXYNOS && (DRM_PTN3460=n || DRM_PTN3460=y || DRM_PTN3460=DRM_EXYNOS)
depends on (DRM_EXYNOS_FIMD || DRM_EXYNOS7DECON) && ARCH_EXYNOS && (DRM_PTN3460=n || DRM_PTN3460=y || DRM_PTN3460=DRM_EXYNOS)
default DRM_EXYNOS
select DRM_PANEL
help

View File

@ -6,11 +6,11 @@ ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/exynos
exynosdrm-y := exynos_drm_drv.o exynos_drm_encoder.o \
exynos_drm_crtc.o exynos_drm_fbdev.o exynos_drm_fb.o \
exynos_drm_buf.o exynos_drm_gem.o exynos_drm_core.o \
exynos_drm_plane.o
exynos_drm_plane.o exynos_drm_dmabuf.o
exynosdrm-$(CONFIG_DRM_EXYNOS_IOMMU) += exynos_drm_iommu.o
exynosdrm-$(CONFIG_DRM_EXYNOS_DMABUF) += exynos_drm_dmabuf.o
exynosdrm-$(CONFIG_DRM_EXYNOS_FIMD) += exynos_drm_fimd.o
exynosdrm-$(CONFIG_DRM_EXYNOS7_DECON) += exynos7_drm_decon.o
exynosdrm-$(CONFIG_DRM_EXYNOS_DPI) += exynos_drm_dpi.o
exynosdrm-$(CONFIG_DRM_EXYNOS_DSI) += exynos_drm_dsi.o
exynosdrm-$(CONFIG_DRM_EXYNOS_DP) += exynos_dp_core.o exynos_dp_reg.o

View File

@ -0,0 +1,990 @@
/* drivers/gpu/drm/exynos/exynos7_drm_decon.c
*
* Copyright (C) 2014 Samsung Electronics Co.Ltd
* Authors:
* Akshu Agarwal <akshua@gmail.com>
* Ajay Kumar <ajaykumar.rs@samsung.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
*/
#include <drm/drmP.h>
#include <drm/exynos_drm.h>
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <video/of_display_timing.h>
#include <video/of_videomode.h>
#include <video/exynos7_decon.h>
#include "exynos_drm_crtc.h"
#include "exynos_drm_drv.h"
#include "exynos_drm_fbdev.h"
#include "exynos_drm_iommu.h"
/*
* DECON stands for Display and Enhancement controller.
*/
#define DECON_DEFAULT_FRAMERATE 60
#define MIN_FB_WIDTH_FOR_16WORD_BURST 128
#define WINDOWS_NR 2
struct decon_win_data {
unsigned int ovl_x;
unsigned int ovl_y;
unsigned int offset_x;
unsigned int offset_y;
unsigned int ovl_width;
unsigned int ovl_height;
unsigned int fb_width;
unsigned int fb_height;
unsigned int bpp;
unsigned int pixel_format;
dma_addr_t dma_addr;
bool enabled;
bool resume;
};
struct decon_context {
struct device *dev;
struct drm_device *drm_dev;
struct exynos_drm_crtc *crtc;
struct clk *pclk;
struct clk *aclk;
struct clk *eclk;
struct clk *vclk;
void __iomem *regs;
struct decon_win_data win_data[WINDOWS_NR];
unsigned int default_win;
unsigned long irq_flags;
bool i80_if;
bool suspended;
int pipe;
wait_queue_head_t wait_vsync_queue;
atomic_t wait_vsync_event;
struct exynos_drm_panel_info panel;
struct exynos_drm_display *display;
};
static const struct of_device_id decon_driver_dt_match[] = {
{.compatible = "samsung,exynos7-decon"},
{},
};
MODULE_DEVICE_TABLE(of, decon_driver_dt_match);
static void decon_wait_for_vblank(struct exynos_drm_crtc *crtc)
{
struct decon_context *ctx = crtc->ctx;
if (ctx->suspended)
return;
atomic_set(&ctx->wait_vsync_event, 1);
/*
* wait for DECON to signal VSYNC interrupt or return after
* timeout which is set to 50ms (refresh rate of 20).
*/
if (!wait_event_timeout(ctx->wait_vsync_queue,
!atomic_read(&ctx->wait_vsync_event),
HZ/20))
DRM_DEBUG_KMS("vblank wait timed out.\n");
}
static void decon_clear_channel(struct decon_context *ctx)
{
int win, ch_enabled = 0;
DRM_DEBUG_KMS("%s\n", __FILE__);
/* Check if any channel is enabled. */
for (win = 0; win < WINDOWS_NR; win++) {
u32 val = readl(ctx->regs + WINCON(win));
if (val & WINCONx_ENWIN) {
val &= ~WINCONx_ENWIN;
writel(val, ctx->regs + WINCON(win));
ch_enabled = 1;
}
}
/* Wait for vsync, as disable channel takes effect at next vsync */
if (ch_enabled) {
unsigned int state = ctx->suspended;
ctx->suspended = 0;
decon_wait_for_vblank(ctx->crtc);
ctx->suspended = state;
}
}
static int decon_ctx_initialize(struct decon_context *ctx,
struct drm_device *drm_dev)
{
struct exynos_drm_private *priv = drm_dev->dev_private;
ctx->drm_dev = drm_dev;
ctx->pipe = priv->pipe++;
/* attach this sub driver to iommu mapping if supported. */
if (is_drm_iommu_supported(ctx->drm_dev)) {
int ret;
/*
* If any channel is already active, iommu will throw
* a PAGE FAULT when enabled. So clear any channel if enabled.
*/
decon_clear_channel(ctx);
ret = drm_iommu_attach_device(ctx->drm_dev, ctx->dev);
if (ret) {
DRM_ERROR("drm_iommu_attach failed.\n");
return ret;
}
}
return 0;
}
static void decon_ctx_remove(struct decon_context *ctx)
{
/* detach this sub driver from iommu mapping if supported. */
if (is_drm_iommu_supported(ctx->drm_dev))
drm_iommu_detach_device(ctx->drm_dev, ctx->dev);
}
static u32 decon_calc_clkdiv(struct decon_context *ctx,
const struct drm_display_mode *mode)
{
unsigned long ideal_clk = mode->htotal * mode->vtotal * mode->vrefresh;
u32 clkdiv;
/* Find the clock divider value that gets us closest to ideal_clk */
clkdiv = DIV_ROUND_UP(clk_get_rate(ctx->vclk), ideal_clk);
return (clkdiv < 0x100) ? clkdiv : 0xff;
}
static bool decon_mode_fixup(struct exynos_drm_crtc *crtc,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
if (adjusted_mode->vrefresh == 0)
adjusted_mode->vrefresh = DECON_DEFAULT_FRAMERATE;
return true;
}
static void decon_commit(struct exynos_drm_crtc *crtc)
{
struct decon_context *ctx = crtc->ctx;
struct drm_display_mode *mode = &crtc->base.mode;
u32 val, clkdiv;
if (ctx->suspended)
return;
/* nothing to do if we haven't set the mode yet */
if (mode->htotal == 0 || mode->vtotal == 0)
return;
if (!ctx->i80_if) {
int vsync_len, vbpd, vfpd, hsync_len, hbpd, hfpd;
/* setup vertical timing values. */
vsync_len = mode->crtc_vsync_end - mode->crtc_vsync_start;
vbpd = mode->crtc_vtotal - mode->crtc_vsync_end;
vfpd = mode->crtc_vsync_start - mode->crtc_vdisplay;
val = VIDTCON0_VBPD(vbpd - 1) | VIDTCON0_VFPD(vfpd - 1);
writel(val, ctx->regs + VIDTCON0);
val = VIDTCON1_VSPW(vsync_len - 1);
writel(val, ctx->regs + VIDTCON1);
/* setup horizontal timing values. */
hsync_len = mode->crtc_hsync_end - mode->crtc_hsync_start;
hbpd = mode->crtc_htotal - mode->crtc_hsync_end;
hfpd = mode->crtc_hsync_start - mode->crtc_hdisplay;
/* setup horizontal timing values. */
val = VIDTCON2_HBPD(hbpd - 1) | VIDTCON2_HFPD(hfpd - 1);
writel(val, ctx->regs + VIDTCON2);
val = VIDTCON3_HSPW(hsync_len - 1);
writel(val, ctx->regs + VIDTCON3);
}
/* setup horizontal and vertical display size. */
val = VIDTCON4_LINEVAL(mode->vdisplay - 1) |
VIDTCON4_HOZVAL(mode->hdisplay - 1);
writel(val, ctx->regs + VIDTCON4);
writel(mode->vdisplay - 1, ctx->regs + LINECNT_OP_THRESHOLD);
/*
* fields of register with prefix '_F' would be updated
* at vsync(same as dma start)
*/
val = VIDCON0_ENVID | VIDCON0_ENVID_F;
writel(val, ctx->regs + VIDCON0);
clkdiv = decon_calc_clkdiv(ctx, mode);
if (clkdiv > 1) {
val = VCLKCON1_CLKVAL_NUM_VCLK(clkdiv - 1);
writel(val, ctx->regs + VCLKCON1);
writel(val, ctx->regs + VCLKCON2);
}
val = readl(ctx->regs + DECON_UPDATE);
val |= DECON_UPDATE_STANDALONE_F;
writel(val, ctx->regs + DECON_UPDATE);
}
static int decon_enable_vblank(struct exynos_drm_crtc *crtc)
{
struct decon_context *ctx = crtc->ctx;
u32 val;
if (ctx->suspended)
return -EPERM;
if (!test_and_set_bit(0, &ctx->irq_flags)) {
val = readl(ctx->regs + VIDINTCON0);
val |= VIDINTCON0_INT_ENABLE;
if (!ctx->i80_if) {
val |= VIDINTCON0_INT_FRAME;
val &= ~VIDINTCON0_FRAMESEL0_MASK;
val |= VIDINTCON0_FRAMESEL0_VSYNC;
}
writel(val, ctx->regs + VIDINTCON0);
}
return 0;
}
static void decon_disable_vblank(struct exynos_drm_crtc *crtc)
{
struct decon_context *ctx = crtc->ctx;
u32 val;
if (ctx->suspended)
return;
if (test_and_clear_bit(0, &ctx->irq_flags)) {
val = readl(ctx->regs + VIDINTCON0);
val &= ~VIDINTCON0_INT_ENABLE;
if (!ctx->i80_if)
val &= ~VIDINTCON0_INT_FRAME;
writel(val, ctx->regs + VIDINTCON0);
}
}
static void decon_win_mode_set(struct exynos_drm_crtc *crtc,
struct exynos_drm_plane *plane)
{
struct decon_context *ctx = crtc->ctx;
struct decon_win_data *win_data;
int win, padding;
if (!plane) {
DRM_ERROR("plane is NULL\n");
return;
}
win = plane->zpos;
if (win == DEFAULT_ZPOS)
win = ctx->default_win;
if (win < 0 || win >= WINDOWS_NR)
return;
win_data = &ctx->win_data[win];
padding = (plane->pitch / (plane->bpp >> 3)) - plane->fb_width;
win_data->offset_x = plane->fb_x;
win_data->offset_y = plane->fb_y;
win_data->fb_width = plane->fb_width + padding;
win_data->fb_height = plane->fb_height;
win_data->ovl_x = plane->crtc_x;
win_data->ovl_y = plane->crtc_y;
win_data->ovl_width = plane->crtc_width;
win_data->ovl_height = plane->crtc_height;
win_data->dma_addr = plane->dma_addr[0];
win_data->bpp = plane->bpp;
win_data->pixel_format = plane->pixel_format;
DRM_DEBUG_KMS("offset_x = %d, offset_y = %d\n",
win_data->offset_x, win_data->offset_y);
DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
win_data->ovl_width, win_data->ovl_height);
DRM_DEBUG_KMS("paddr = 0x%lx\n", (unsigned long)win_data->dma_addr);
DRM_DEBUG_KMS("fb_width = %d, crtc_width = %d\n",
plane->fb_width, plane->crtc_width);
}
static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win)
{
struct decon_win_data *win_data = &ctx->win_data[win];
unsigned long val;
val = readl(ctx->regs + WINCON(win));
val &= ~WINCONx_BPPMODE_MASK;
switch (win_data->pixel_format) {
case DRM_FORMAT_RGB565:
val |= WINCONx_BPPMODE_16BPP_565;
val |= WINCONx_BURSTLEN_16WORD;
break;
case DRM_FORMAT_XRGB8888:
val |= WINCONx_BPPMODE_24BPP_xRGB;
val |= WINCONx_BURSTLEN_16WORD;
break;
case DRM_FORMAT_XBGR8888:
val |= WINCONx_BPPMODE_24BPP_xBGR;
val |= WINCONx_BURSTLEN_16WORD;
break;
case DRM_FORMAT_RGBX8888:
val |= WINCONx_BPPMODE_24BPP_RGBx;
val |= WINCONx_BURSTLEN_16WORD;
break;
case DRM_FORMAT_BGRX8888:
val |= WINCONx_BPPMODE_24BPP_BGRx;
val |= WINCONx_BURSTLEN_16WORD;
break;
case DRM_FORMAT_ARGB8888:
val |= WINCONx_BPPMODE_32BPP_ARGB | WINCONx_BLD_PIX |
WINCONx_ALPHA_SEL;
val |= WINCONx_BURSTLEN_16WORD;
break;
case DRM_FORMAT_ABGR8888:
val |= WINCONx_BPPMODE_32BPP_ABGR | WINCONx_BLD_PIX |
WINCONx_ALPHA_SEL;
val |= WINCONx_BURSTLEN_16WORD;
break;
case DRM_FORMAT_RGBA8888:
val |= WINCONx_BPPMODE_32BPP_RGBA | WINCONx_BLD_PIX |
WINCONx_ALPHA_SEL;
val |= WINCONx_BURSTLEN_16WORD;
break;
case DRM_FORMAT_BGRA8888:
val |= WINCONx_BPPMODE_32BPP_BGRA | WINCONx_BLD_PIX |
WINCONx_ALPHA_SEL;
val |= WINCONx_BURSTLEN_16WORD;
break;
default:
DRM_DEBUG_KMS("invalid pixel size so using unpacked 24bpp.\n");
val |= WINCONx_BPPMODE_24BPP_xRGB;
val |= WINCONx_BURSTLEN_16WORD;
break;
}
DRM_DEBUG_KMS("bpp = %d\n", win_data->bpp);
/*
* In case of exynos, setting dma-burst to 16Word causes permanent
* tearing for very small buffers, e.g. cursor buffer. Burst Mode
* switching which is based on plane size is not recommended as
* plane size varies a lot towards the end of the screen and rapid
* movement causes unstable DMA which results into iommu crash/tear.
*/
if (win_data->fb_width < MIN_FB_WIDTH_FOR_16WORD_BURST) {
val &= ~WINCONx_BURSTLEN_MASK;
val |= WINCONx_BURSTLEN_8WORD;
}
writel(val, ctx->regs + WINCON(win));
}
static void decon_win_set_colkey(struct decon_context *ctx, unsigned int win)
{
unsigned int keycon0 = 0, keycon1 = 0;
keycon0 = ~(WxKEYCON0_KEYBL_EN | WxKEYCON0_KEYEN_F |
WxKEYCON0_DIRCON) | WxKEYCON0_COMPKEY(0);
keycon1 = WxKEYCON1_COLVAL(0xffffffff);
writel(keycon0, ctx->regs + WKEYCON0_BASE(win));
writel(keycon1, ctx->regs + WKEYCON1_BASE(win));
}
/**
* shadow_protect_win() - disable updating values from shadow registers at vsync
*
* @win: window to protect registers for
* @protect: 1 to protect (disable updates)
*/
static void decon_shadow_protect_win(struct decon_context *ctx,
int win, bool protect)
{
u32 bits, val;
bits = SHADOWCON_WINx_PROTECT(win);
val = readl(ctx->regs + SHADOWCON);
if (protect)
val |= bits;
else
val &= ~bits;
writel(val, ctx->regs + SHADOWCON);
}
static void decon_win_commit(struct exynos_drm_crtc *crtc, int zpos)
{
struct decon_context *ctx = crtc->ctx;
struct drm_display_mode *mode = &crtc->base.mode;
struct decon_win_data *win_data;
int win = zpos;
unsigned long val, alpha;
unsigned int last_x;
unsigned int last_y;
if (ctx->suspended)
return;
if (win == DEFAULT_ZPOS)
win = ctx->default_win;
if (win < 0 || win >= WINDOWS_NR)
return;
win_data = &ctx->win_data[win];
/* If suspended, enable this on resume */
if (ctx->suspended) {
win_data->resume = true;
return;
}
/*
* SHADOWCON/PRTCON register is used for enabling timing.
*
* for example, once only width value of a register is set,
* if the dma is started then decon hardware could malfunction so
* with protect window setting, the register fields with prefix '_F'
* wouldn't be updated at vsync also but updated once unprotect window
* is set.
*/
/* protect windows */
decon_shadow_protect_win(ctx, win, true);
/* buffer start address */
val = (unsigned long)win_data->dma_addr;
writel(val, ctx->regs + VIDW_BUF_START(win));
/* buffer size */
writel(win_data->fb_width, ctx->regs + VIDW_WHOLE_X(win));
writel(win_data->fb_height, ctx->regs + VIDW_WHOLE_Y(win));
/* offset from the start of the buffer to read */
writel(win_data->offset_x, ctx->regs + VIDW_OFFSET_X(win));
writel(win_data->offset_y, ctx->regs + VIDW_OFFSET_Y(win));
DRM_DEBUG_KMS("start addr = 0x%lx\n",
(unsigned long)win_data->dma_addr);
DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
win_data->ovl_width, win_data->ovl_height);
/*
* OSD position.
* In case the window layout goes of LCD layout, DECON fails.
*/
if ((win_data->ovl_x + win_data->ovl_width) > mode->hdisplay)
win_data->ovl_x = mode->hdisplay - win_data->ovl_width;
if ((win_data->ovl_y + win_data->ovl_height) > mode->vdisplay)
win_data->ovl_y = mode->vdisplay - win_data->ovl_height;
val = VIDOSDxA_TOPLEFT_X(win_data->ovl_x) |
VIDOSDxA_TOPLEFT_Y(win_data->ovl_y);
writel(val, ctx->regs + VIDOSD_A(win));
last_x = win_data->ovl_x + win_data->ovl_width;
if (last_x)
last_x--;
last_y = win_data->ovl_y + win_data->ovl_height;
if (last_y)
last_y--;
val = VIDOSDxB_BOTRIGHT_X(last_x) | VIDOSDxB_BOTRIGHT_Y(last_y);
writel(val, ctx->regs + VIDOSD_B(win));
DRM_DEBUG_KMS("osd pos: tx = %d, ty = %d, bx = %d, by = %d\n",
win_data->ovl_x, win_data->ovl_y, last_x, last_y);
/* OSD alpha */
alpha = VIDOSDxC_ALPHA0_R_F(0x0) |
VIDOSDxC_ALPHA0_G_F(0x0) |
VIDOSDxC_ALPHA0_B_F(0x0);
writel(alpha, ctx->regs + VIDOSD_C(win));
alpha = VIDOSDxD_ALPHA1_R_F(0xff) |
VIDOSDxD_ALPHA1_G_F(0xff) |
VIDOSDxD_ALPHA1_B_F(0xff);
writel(alpha, ctx->regs + VIDOSD_D(win));
decon_win_set_pixfmt(ctx, win);
/* hardware window 0 doesn't support color key. */
if (win != 0)
decon_win_set_colkey(ctx, win);
/* wincon */
val = readl(ctx->regs + WINCON(win));
val |= WINCONx_TRIPLE_BUF_MODE;
val |= WINCONx_ENWIN;
writel(val, ctx->regs + WINCON(win));
/* Enable DMA channel and unprotect windows */
decon_shadow_protect_win(ctx, win, false);
val = readl(ctx->regs + DECON_UPDATE);
val |= DECON_UPDATE_STANDALONE_F;
writel(val, ctx->regs + DECON_UPDATE);
win_data->enabled = true;
}
static void decon_win_disable(struct exynos_drm_crtc *crtc, int zpos)
{
struct decon_context *ctx = crtc->ctx;
struct decon_win_data *win_data;
int win = zpos;
u32 val;
if (win == DEFAULT_ZPOS)
win = ctx->default_win;
if (win < 0 || win >= WINDOWS_NR)
return;
win_data = &ctx->win_data[win];
if (ctx->suspended) {
/* do not resume this window*/
win_data->resume = false;
return;
}
/* protect windows */
decon_shadow_protect_win(ctx, win, true);
/* wincon */
val = readl(ctx->regs + WINCON(win));
val &= ~WINCONx_ENWIN;
writel(val, ctx->regs + WINCON(win));
/* unprotect windows */
decon_shadow_protect_win(ctx, win, false);
val = readl(ctx->regs + DECON_UPDATE);
val |= DECON_UPDATE_STANDALONE_F;
writel(val, ctx->regs + DECON_UPDATE);
win_data->enabled = false;
}
static void decon_window_suspend(struct decon_context *ctx)
{
struct decon_win_data *win_data;
int i;
for (i = 0; i < WINDOWS_NR; i++) {
win_data = &ctx->win_data[i];
win_data->resume = win_data->enabled;
if (win_data->enabled)
decon_win_disable(ctx->crtc, i);
}
}
static void decon_window_resume(struct decon_context *ctx)
{
struct decon_win_data *win_data;
int i;
for (i = 0; i < WINDOWS_NR; i++) {
win_data = &ctx->win_data[i];
win_data->enabled = win_data->resume;
win_data->resume = false;
}
}
static void decon_apply(struct decon_context *ctx)
{
struct decon_win_data *win_data;
int i;
for (i = 0; i < WINDOWS_NR; i++) {
win_data = &ctx->win_data[i];
if (win_data->enabled)
decon_win_commit(ctx->crtc, i);
else
decon_win_disable(ctx->crtc, i);
}
decon_commit(ctx->crtc);
}
static void decon_init(struct decon_context *ctx)
{
u32 val;
writel(VIDCON0_SWRESET, ctx->regs + VIDCON0);
val = VIDOUTCON0_DISP_IF_0_ON;
if (!ctx->i80_if)
val |= VIDOUTCON0_RGBIF;
writel(val, ctx->regs + VIDOUTCON0);
writel(VCLKCON0_CLKVALUP | VCLKCON0_VCLKFREE, ctx->regs + VCLKCON0);
if (!ctx->i80_if)
writel(VIDCON1_VCLK_HOLD, ctx->regs + VIDCON1(0));
}
static int decon_poweron(struct decon_context *ctx)
{
int ret;
if (!ctx->suspended)
return 0;
ctx->suspended = false;
pm_runtime_get_sync(ctx->dev);
ret = clk_prepare_enable(ctx->pclk);
if (ret < 0) {
DRM_ERROR("Failed to prepare_enable the pclk [%d]\n", ret);
goto pclk_err;
}
ret = clk_prepare_enable(ctx->aclk);
if (ret < 0) {
DRM_ERROR("Failed to prepare_enable the aclk [%d]\n", ret);
goto aclk_err;
}
ret = clk_prepare_enable(ctx->eclk);
if (ret < 0) {
DRM_ERROR("Failed to prepare_enable the eclk [%d]\n", ret);
goto eclk_err;
}
ret = clk_prepare_enable(ctx->vclk);
if (ret < 0) {
DRM_ERROR("Failed to prepare_enable the vclk [%d]\n", ret);
goto vclk_err;
}
decon_init(ctx);
/* if vblank was enabled status, enable it again. */
if (test_and_clear_bit(0, &ctx->irq_flags)) {
ret = decon_enable_vblank(ctx->crtc);
if (ret) {
DRM_ERROR("Failed to re-enable vblank [%d]\n", ret);
goto err;
}
}
decon_window_resume(ctx);
decon_apply(ctx);
return 0;
err:
clk_disable_unprepare(ctx->vclk);
vclk_err:
clk_disable_unprepare(ctx->eclk);
eclk_err:
clk_disable_unprepare(ctx->aclk);
aclk_err:
clk_disable_unprepare(ctx->pclk);
pclk_err:
ctx->suspended = true;
return ret;
}
static int decon_poweroff(struct decon_context *ctx)
{
if (ctx->suspended)
return 0;
/*
* We need to make sure that all windows are disabled before we
* suspend that connector. Otherwise we might try to scan from
* a destroyed buffer later.
*/
decon_window_suspend(ctx);
clk_disable_unprepare(ctx->vclk);
clk_disable_unprepare(ctx->eclk);
clk_disable_unprepare(ctx->aclk);
clk_disable_unprepare(ctx->pclk);
pm_runtime_put_sync(ctx->dev);
ctx->suspended = true;
return 0;
}
static void decon_dpms(struct exynos_drm_crtc *crtc, int mode)
{
DRM_DEBUG_KMS("%s, %d\n", __FILE__, mode);
switch (mode) {
case DRM_MODE_DPMS_ON:
decon_poweron(crtc->ctx);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
decon_poweroff(crtc->ctx);
break;
default:
DRM_DEBUG_KMS("unspecified mode %d\n", mode);
break;
}
}
static struct exynos_drm_crtc_ops decon_crtc_ops = {
.dpms = decon_dpms,
.mode_fixup = decon_mode_fixup,
.commit = decon_commit,
.enable_vblank = decon_enable_vblank,
.disable_vblank = decon_disable_vblank,
.wait_for_vblank = decon_wait_for_vblank,
.win_mode_set = decon_win_mode_set,
.win_commit = decon_win_commit,
.win_disable = decon_win_disable,
};
static irqreturn_t decon_irq_handler(int irq, void *dev_id)
{
struct decon_context *ctx = (struct decon_context *)dev_id;
u32 val, clear_bit;
val = readl(ctx->regs + VIDINTCON1);
clear_bit = ctx->i80_if ? VIDINTCON1_INT_I80 : VIDINTCON1_INT_FRAME;
if (val & clear_bit)
writel(clear_bit, ctx->regs + VIDINTCON1);
/* check the crtc is detached already from encoder */
if (ctx->pipe < 0 || !ctx->drm_dev)
goto out;
if (!ctx->i80_if) {
drm_handle_vblank(ctx->drm_dev, ctx->pipe);
exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe);
/* set wait vsync event to zero and wake up queue. */
if (atomic_read(&ctx->wait_vsync_event)) {
atomic_set(&ctx->wait_vsync_event, 0);
wake_up(&ctx->wait_vsync_queue);
}
}
out:
return IRQ_HANDLED;
}
static int decon_bind(struct device *dev, struct device *master, void *data)
{
struct decon_context *ctx = dev_get_drvdata(dev);
struct drm_device *drm_dev = data;
int ret;
ret = decon_ctx_initialize(ctx, drm_dev);
if (ret) {
DRM_ERROR("decon_ctx_initialize failed.\n");
return ret;
}
ctx->crtc = exynos_drm_crtc_create(drm_dev, ctx->pipe,
EXYNOS_DISPLAY_TYPE_LCD,
&decon_crtc_ops, ctx);
if (IS_ERR(ctx->crtc)) {
decon_ctx_remove(ctx);
return PTR_ERR(ctx->crtc);
}
if (ctx->display)
exynos_drm_create_enc_conn(drm_dev, ctx->display);
return 0;
}
static void decon_unbind(struct device *dev, struct device *master,
void *data)
{
struct decon_context *ctx = dev_get_drvdata(dev);
decon_dpms(ctx->crtc, DRM_MODE_DPMS_OFF);
if (ctx->display)
exynos_dpi_remove(ctx->display);
decon_ctx_remove(ctx);
}
static const struct component_ops decon_component_ops = {
.bind = decon_bind,
.unbind = decon_unbind,
};
static int decon_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct decon_context *ctx;
struct device_node *i80_if_timings;
struct resource *res;
int ret;
if (!dev->of_node)
return -ENODEV;
ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
ret = exynos_drm_component_add(dev, EXYNOS_DEVICE_TYPE_CRTC,
EXYNOS_DISPLAY_TYPE_LCD);
if (ret)
return ret;
ctx->dev = dev;
ctx->suspended = true;
i80_if_timings = of_get_child_by_name(dev->of_node, "i80-if-timings");
if (i80_if_timings)
ctx->i80_if = true;
of_node_put(i80_if_timings);
ctx->regs = of_iomap(dev->of_node, 0);
if (IS_ERR(ctx->regs)) {
ret = PTR_ERR(ctx->regs);
goto err_del_component;
}
ctx->pclk = devm_clk_get(dev, "pclk_decon0");
if (IS_ERR(ctx->pclk)) {
dev_err(dev, "failed to get bus clock pclk\n");
ret = PTR_ERR(ctx->pclk);
goto err_iounmap;
}
ctx->aclk = devm_clk_get(dev, "aclk_decon0");
if (IS_ERR(ctx->aclk)) {
dev_err(dev, "failed to get bus clock aclk\n");
ret = PTR_ERR(ctx->aclk);
goto err_iounmap;
}
ctx->eclk = devm_clk_get(dev, "decon0_eclk");
if (IS_ERR(ctx->eclk)) {
dev_err(dev, "failed to get eclock\n");
ret = PTR_ERR(ctx->eclk);
goto err_iounmap;
}
ctx->vclk = devm_clk_get(dev, "decon0_vclk");
if (IS_ERR(ctx->vclk)) {
dev_err(dev, "failed to get vclock\n");
ret = PTR_ERR(ctx->vclk);
goto err_iounmap;
}
res = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
ctx->i80_if ? "lcd_sys" : "vsync");
if (!res) {
dev_err(dev, "irq request failed.\n");
ret = -ENXIO;
goto err_iounmap;
}
ret = devm_request_irq(dev, res->start, decon_irq_handler,
0, "drm_decon", ctx);
if (ret) {
dev_err(dev, "irq request failed.\n");
goto err_iounmap;
}
init_waitqueue_head(&ctx->wait_vsync_queue);
atomic_set(&ctx->wait_vsync_event, 0);
platform_set_drvdata(pdev, ctx);
ctx->display = exynos_dpi_probe(dev);
if (IS_ERR(ctx->display)) {
ret = PTR_ERR(ctx->display);
goto err_iounmap;
}
pm_runtime_enable(dev);
ret = component_add(dev, &decon_component_ops);
if (ret)
goto err_disable_pm_runtime;
return ret;
err_disable_pm_runtime:
pm_runtime_disable(dev);
err_iounmap:
iounmap(ctx->regs);
err_del_component:
exynos_drm_component_del(dev, EXYNOS_DEVICE_TYPE_CRTC);
return ret;
}
static int decon_remove(struct platform_device *pdev)
{
struct decon_context *ctx = dev_get_drvdata(&pdev->dev);
pm_runtime_disable(&pdev->dev);
iounmap(ctx->regs);
component_del(&pdev->dev, &decon_component_ops);
exynos_drm_component_del(&pdev->dev, EXYNOS_DEVICE_TYPE_CRTC);
return 0;
}
struct platform_driver decon_driver = {
.probe = decon_probe,
.remove = decon_remove,
.driver = {
.name = "exynos-decon",
.of_match_table = decon_driver_dt_match,
},
};

View File

@ -18,6 +18,7 @@
#include <linux/interrupt.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
#include <linux/of_graph.h>
#include <linux/gpio.h>
#include <linux/component.h>
#include <linux/phy/phy.h>
@ -993,32 +994,20 @@ static struct drm_connector_helper_funcs exynos_dp_connector_helper_funcs = {
.best_encoder = exynos_dp_best_encoder,
};
static bool find_bridge(const char *compat, struct bridge_init *bridge)
{
bridge->client = NULL;
bridge->node = of_find_compatible_node(NULL, NULL, compat);
if (!bridge->node)
return false;
bridge->client = of_find_i2c_device_by_node(bridge->node);
if (!bridge->client)
return false;
return true;
}
/* returns the number of bridges attached */
static int exynos_drm_attach_lcd_bridge(struct drm_device *dev,
static int exynos_drm_attach_lcd_bridge(struct exynos_dp_device *dp,
struct drm_encoder *encoder)
{
struct bridge_init bridge;
int ret;
if (find_bridge("nxp,ptn3460", &bridge)) {
ret = ptn3460_init(dev, encoder, bridge.client, bridge.node);
if (!ret)
return 1;
encoder->bridge = dp->bridge;
dp->bridge->encoder = encoder;
ret = drm_bridge_attach(encoder->dev, dp->bridge);
if (ret) {
DRM_ERROR("Failed to attach bridge to drm\n");
return ret;
}
return 0;
}
@ -1032,9 +1021,11 @@ static int exynos_dp_create_connector(struct exynos_drm_display *display,
dp->encoder = encoder;
/* Pre-empt DP connector creation if there's a bridge */
ret = exynos_drm_attach_lcd_bridge(dp->drm_dev, encoder);
if (ret)
return 0;
if (dp->bridge) {
ret = exynos_drm_attach_lcd_bridge(dp, encoder);
if (!ret)
return 0;
}
connector->polled = DRM_CONNECTOR_POLL_HPD;
@ -1067,10 +1058,8 @@ static void exynos_dp_phy_exit(struct exynos_dp_device *dp)
phy_power_off(dp->phy);
}
static void exynos_dp_poweron(struct exynos_drm_display *display)
static void exynos_dp_poweron(struct exynos_dp_device *dp)
{
struct exynos_dp_device *dp = display_to_dp(display);
if (dp->dpms_mode == DRM_MODE_DPMS_ON)
return;
@ -1085,13 +1074,11 @@ static void exynos_dp_poweron(struct exynos_drm_display *display)
exynos_dp_phy_init(dp);
exynos_dp_init_dp(dp);
enable_irq(dp->irq);
exynos_dp_commit(display);
exynos_dp_commit(&dp->display);
}
static void exynos_dp_poweroff(struct exynos_drm_display *display)
static void exynos_dp_poweroff(struct exynos_dp_device *dp)
{
struct exynos_dp_device *dp = display_to_dp(display);
if (dp->dpms_mode != DRM_MODE_DPMS_ON)
return;
@ -1119,12 +1106,12 @@ static void exynos_dp_dpms(struct exynos_drm_display *display, int mode)
switch (mode) {
case DRM_MODE_DPMS_ON:
exynos_dp_poweron(display);
exynos_dp_poweron(dp);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
exynos_dp_poweroff(display);
exynos_dp_poweroff(dp);
break;
default:
break;
@ -1241,7 +1228,7 @@ static int exynos_dp_bind(struct device *dev, struct device *master, void *data)
}
}
if (!dp->panel) {
if (!dp->panel && !dp->bridge) {
ret = exynos_dp_dt_parse_panel(dp);
if (ret)
return ret;
@ -1325,7 +1312,7 @@ static const struct component_ops exynos_dp_ops = {
static int exynos_dp_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *panel_node;
struct device_node *panel_node, *bridge_node, *endpoint;
struct exynos_dp_device *dp;
int ret;
@ -1351,6 +1338,18 @@ static int exynos_dp_probe(struct platform_device *pdev)
return -EPROBE_DEFER;
}
endpoint = of_graph_get_next_endpoint(dev->of_node, NULL);
if (endpoint) {
bridge_node = of_graph_get_remote_port_parent(endpoint);
if (bridge_node) {
dp->bridge = of_drm_find_bridge(bridge_node);
of_node_put(bridge_node);
if (!dp->bridge)
return -EPROBE_DEFER;
} else
return -EPROBE_DEFER;
}
ret = component_add(&pdev->dev, &exynos_dp_ops);
if (ret)
exynos_drm_component_del(&pdev->dev,

View File

@ -153,6 +153,7 @@ struct exynos_dp_device {
struct drm_connector connector;
struct drm_encoder *encoder;
struct drm_panel *panel;
struct drm_bridge *bridge;
struct clk *clock;
unsigned int irq;
void __iomem *reg_base;

View File

@ -63,11 +63,11 @@ static int lowlevel_buffer_allocate(struct drm_device *dev,
return -ENOMEM;
}
buf->kvaddr = (void __iomem *)dma_alloc_attrs(dev->dev,
buf->cookie = dma_alloc_attrs(dev->dev,
buf->size,
&buf->dma_addr, GFP_KERNEL,
&buf->dma_attrs);
if (!buf->kvaddr) {
if (!buf->cookie) {
DRM_ERROR("failed to allocate buffer.\n");
ret = -ENOMEM;
goto err_free;
@ -132,7 +132,7 @@ static void lowlevel_buffer_deallocate(struct drm_device *dev,
buf->sgt = NULL;
if (!is_drm_iommu_supported(dev)) {
dma_free_attrs(dev->dev, buf->size, buf->kvaddr,
dma_free_attrs(dev->dev, buf->size, buf->cookie,
(dma_addr_t)buf->dma_addr, &buf->dma_attrs);
drm_free_large(buf->pages);
} else

View File

@ -20,43 +20,9 @@
#include "exynos_drm_encoder.h"
#include "exynos_drm_plane.h"
#define to_exynos_crtc(x) container_of(x, struct exynos_drm_crtc,\
drm_crtc)
enum exynos_crtc_mode {
CRTC_MODE_NORMAL, /* normal mode */
CRTC_MODE_BLANK, /* The private plane of crtc is blank */
};
/*
* Exynos specific crtc structure.
*
* @drm_crtc: crtc object.
* @manager: the manager associated with this crtc
* @pipe: a crtc index created at load() with a new crtc object creation
* and the crtc object would be set to private->crtc array
* to get a crtc object corresponding to this pipe from private->crtc
* array when irq interrupt occurred. the reason of using this pipe is that
* drm framework doesn't support multiple irq yet.
* we can refer to the crtc to current hardware interrupt occurred through
* this pipe value.
* @dpms: store the crtc dpms value
* @mode: store the crtc mode value
*/
struct exynos_drm_crtc {
struct drm_crtc drm_crtc;
struct exynos_drm_manager *manager;
unsigned int pipe;
unsigned int dpms;
enum exynos_crtc_mode mode;
wait_queue_head_t pending_flip_queue;
atomic_t pending_flip;
};
static void exynos_drm_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
struct exynos_drm_manager *manager = exynos_crtc->manager;
DRM_DEBUG_KMS("crtc[%d] mode[%d]\n", crtc->base.id, mode);
@ -74,8 +40,8 @@ static void exynos_drm_crtc_dpms(struct drm_crtc *crtc, int mode)
drm_crtc_vblank_off(crtc);
}
if (manager->ops->dpms)
manager->ops->dpms(manager, mode);
if (exynos_crtc->ops->dpms)
exynos_crtc->ops->dpms(exynos_crtc, mode);
exynos_crtc->dpms = mode;
@ -91,16 +57,15 @@ static void exynos_drm_crtc_prepare(struct drm_crtc *crtc)
static void exynos_drm_crtc_commit(struct drm_crtc *crtc)
{
struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
struct exynos_drm_manager *manager = exynos_crtc->manager;
struct exynos_drm_plane *exynos_plane = to_exynos_plane(crtc->primary);
exynos_drm_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
exynos_plane_commit(crtc->primary);
if (exynos_crtc->ops->win_commit)
exynos_crtc->ops->win_commit(exynos_crtc, exynos_plane->zpos);
if (manager->ops->commit)
manager->ops->commit(manager);
exynos_plane_dpms(crtc->primary, DRM_MODE_DPMS_ON);
if (exynos_crtc->ops->commit)
exynos_crtc->ops->commit(exynos_crtc);
}
static bool
@ -109,10 +74,10 @@ exynos_drm_crtc_mode_fixup(struct drm_crtc *crtc,
struct drm_display_mode *adjusted_mode)
{
struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
struct exynos_drm_manager *manager = exynos_crtc->manager;
if (manager->ops->mode_fixup)
return manager->ops->mode_fixup(manager, mode, adjusted_mode);
if (exynos_crtc->ops->mode_fixup)
return exynos_crtc->ops->mode_fixup(exynos_crtc, mode,
adjusted_mode);
return true;
}
@ -122,11 +87,10 @@ exynos_drm_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode, int x, int y,
struct drm_framebuffer *old_fb)
{
struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
struct exynos_drm_manager *manager = exynos_crtc->manager;
struct drm_framebuffer *fb = crtc->primary->fb;
unsigned int crtc_w;
unsigned int crtc_h;
int ret;
/*
* copy the mode data adjusted by mode_fixup() into crtc->mode
@ -134,24 +98,25 @@ exynos_drm_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
*/
memcpy(&crtc->mode, adjusted_mode, sizeof(*adjusted_mode));
ret = exynos_check_plane(crtc->primary, fb);
if (ret < 0)
return ret;
crtc_w = fb->width - x;
crtc_h = fb->height - y;
exynos_plane_mode_set(crtc->primary, crtc, fb, 0, 0,
crtc_w, crtc_h, x, y, crtc_w, crtc_h);
if (manager->ops->mode_set)
manager->ops->mode_set(manager, &crtc->mode);
return exynos_plane_mode_set(crtc->primary, crtc, fb, 0, 0,
crtc_w, crtc_h, x, y, crtc_w, crtc_h);
return 0;
}
static int exynos_drm_crtc_mode_set_commit(struct drm_crtc *crtc, int x, int y,
static int exynos_drm_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb)
{
struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
struct drm_framebuffer *fb = crtc->primary->fb;
unsigned int crtc_w;
unsigned int crtc_h;
int ret;
/* when framebuffer changing is requested, crtc's dpms should be on */
if (exynos_crtc->dpms > DRM_MODE_DPMS_ON) {
@ -162,20 +127,8 @@ static int exynos_drm_crtc_mode_set_commit(struct drm_crtc *crtc, int x, int y,
crtc_w = fb->width - x;
crtc_h = fb->height - y;
ret = exynos_plane_mode_set(crtc->primary, crtc, fb, 0, 0,
crtc_w, crtc_h, x, y, crtc_w, crtc_h);
if (ret)
return ret;
exynos_drm_crtc_commit(crtc);
return 0;
}
static int exynos_drm_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb)
{
return exynos_drm_crtc_mode_set_commit(crtc, x, y, old_fb);
return exynos_update_plane(crtc->primary, crtc, fb, 0, 0,
crtc_w, crtc_h, x, y, crtc_w, crtc_h);
}
static void exynos_drm_crtc_disable(struct drm_crtc *crtc)
@ -214,6 +167,7 @@ static int exynos_drm_crtc_page_flip(struct drm_crtc *crtc,
struct exynos_drm_private *dev_priv = dev->dev_private;
struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
struct drm_framebuffer *old_fb = crtc->primary->fb;
unsigned int crtc_w, crtc_h;
int ret = -EINVAL;
/* when the page flip is requested, crtc's dpms should be on */
@ -245,8 +199,11 @@ static int exynos_drm_crtc_page_flip(struct drm_crtc *crtc,
spin_unlock_irq(&dev->event_lock);
crtc->primary->fb = fb;
ret = exynos_drm_crtc_mode_set_commit(crtc, crtc->x, crtc->y,
NULL);
crtc_w = fb->width - crtc->x;
crtc_h = fb->height - crtc->y;
ret = exynos_update_plane(crtc->primary, crtc, fb, 0, 0,
crtc_w, crtc_h, crtc->x, crtc->y,
crtc_w, crtc_h);
if (ret) {
crtc->primary->fb = old_fb;
@ -275,116 +232,61 @@ static void exynos_drm_crtc_destroy(struct drm_crtc *crtc)
kfree(exynos_crtc);
}
static int exynos_drm_crtc_set_property(struct drm_crtc *crtc,
struct drm_property *property,
uint64_t val)
{
struct drm_device *dev = crtc->dev;
struct exynos_drm_private *dev_priv = dev->dev_private;
struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
if (property == dev_priv->crtc_mode_property) {
enum exynos_crtc_mode mode = val;
if (mode == exynos_crtc->mode)
return 0;
exynos_crtc->mode = mode;
switch (mode) {
case CRTC_MODE_NORMAL:
exynos_drm_crtc_commit(crtc);
break;
case CRTC_MODE_BLANK:
exynos_plane_dpms(crtc->primary, DRM_MODE_DPMS_OFF);
break;
default:
break;
}
return 0;
}
return -EINVAL;
}
static struct drm_crtc_funcs exynos_crtc_funcs = {
.set_config = drm_crtc_helper_set_config,
.page_flip = exynos_drm_crtc_page_flip,
.destroy = exynos_drm_crtc_destroy,
.set_property = exynos_drm_crtc_set_property,
};
static const struct drm_prop_enum_list mode_names[] = {
{ CRTC_MODE_NORMAL, "normal" },
{ CRTC_MODE_BLANK, "blank" },
};
static void exynos_drm_crtc_attach_mode_property(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct exynos_drm_private *dev_priv = dev->dev_private;
struct drm_property *prop;
prop = dev_priv->crtc_mode_property;
if (!prop) {
prop = drm_property_create_enum(dev, 0, "mode", mode_names,
ARRAY_SIZE(mode_names));
if (!prop)
return;
dev_priv->crtc_mode_property = prop;
}
drm_object_attach_property(&crtc->base, prop, 0);
}
int exynos_drm_crtc_create(struct exynos_drm_manager *manager)
struct exynos_drm_crtc *exynos_drm_crtc_create(struct drm_device *drm_dev,
int pipe,
enum exynos_drm_output_type type,
struct exynos_drm_crtc_ops *ops,
void *ctx)
{
struct exynos_drm_crtc *exynos_crtc;
struct drm_plane *plane;
struct exynos_drm_private *private = manager->drm_dev->dev_private;
struct exynos_drm_private *private = drm_dev->dev_private;
struct drm_crtc *crtc;
int ret;
exynos_crtc = kzalloc(sizeof(*exynos_crtc), GFP_KERNEL);
if (!exynos_crtc)
return -ENOMEM;
return ERR_PTR(-ENOMEM);
init_waitqueue_head(&exynos_crtc->pending_flip_queue);
atomic_set(&exynos_crtc->pending_flip, 0);
exynos_crtc->dpms = DRM_MODE_DPMS_OFF;
exynos_crtc->manager = manager;
exynos_crtc->pipe = manager->pipe;
plane = exynos_plane_init(manager->drm_dev, 1 << manager->pipe,
exynos_crtc->pipe = pipe;
exynos_crtc->type = type;
exynos_crtc->ops = ops;
exynos_crtc->ctx = ctx;
plane = exynos_plane_init(drm_dev, 1 << pipe,
DRM_PLANE_TYPE_PRIMARY);
if (IS_ERR(plane)) {
ret = PTR_ERR(plane);
goto err_plane;
}
manager->crtc = &exynos_crtc->drm_crtc;
crtc = &exynos_crtc->drm_crtc;
crtc = &exynos_crtc->base;
private->crtc[manager->pipe] = crtc;
private->crtc[pipe] = crtc;
ret = drm_crtc_init_with_planes(manager->drm_dev, crtc, plane, NULL,
ret = drm_crtc_init_with_planes(drm_dev, crtc, plane, NULL,
&exynos_crtc_funcs);
if (ret < 0)
goto err_crtc;
drm_crtc_helper_add(crtc, &exynos_crtc_helper_funcs);
exynos_drm_crtc_attach_mode_property(crtc);
return 0;
return exynos_crtc;
err_crtc:
plane->funcs->destroy(plane);
err_plane:
kfree(exynos_crtc);
return ret;
return ERR_PTR(ret);
}
int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int pipe)
@ -392,13 +294,12 @@ int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int pipe)
struct exynos_drm_private *private = dev->dev_private;
struct exynos_drm_crtc *exynos_crtc =
to_exynos_crtc(private->crtc[pipe]);
struct exynos_drm_manager *manager = exynos_crtc->manager;
if (exynos_crtc->dpms != DRM_MODE_DPMS_ON)
return -EPERM;
if (manager->ops->enable_vblank)
manager->ops->enable_vblank(manager);
if (exynos_crtc->ops->enable_vblank)
exynos_crtc->ops->enable_vblank(exynos_crtc);
return 0;
}
@ -408,13 +309,12 @@ void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int pipe)
struct exynos_drm_private *private = dev->dev_private;
struct exynos_drm_crtc *exynos_crtc =
to_exynos_crtc(private->crtc[pipe]);
struct exynos_drm_manager *manager = exynos_crtc->manager;
if (exynos_crtc->dpms != DRM_MODE_DPMS_ON)
return;
if (manager->ops->disable_vblank)
manager->ops->disable_vblank(manager);
if (exynos_crtc->ops->disable_vblank)
exynos_crtc->ops->disable_vblank(exynos_crtc);
}
void exynos_drm_crtc_finish_pageflip(struct drm_device *dev, int pipe)
@ -443,42 +343,9 @@ void exynos_drm_crtc_finish_pageflip(struct drm_device *dev, int pipe)
spin_unlock_irqrestore(&dev->event_lock, flags);
}
void exynos_drm_crtc_plane_mode_set(struct drm_crtc *crtc,
struct exynos_drm_overlay *overlay)
{
struct exynos_drm_manager *manager = to_exynos_crtc(crtc)->manager;
if (manager->ops->win_mode_set)
manager->ops->win_mode_set(manager, overlay);
}
void exynos_drm_crtc_plane_commit(struct drm_crtc *crtc, int zpos)
{
struct exynos_drm_manager *manager = to_exynos_crtc(crtc)->manager;
if (manager->ops->win_commit)
manager->ops->win_commit(manager, zpos);
}
void exynos_drm_crtc_plane_enable(struct drm_crtc *crtc, int zpos)
{
struct exynos_drm_manager *manager = to_exynos_crtc(crtc)->manager;
if (manager->ops->win_enable)
manager->ops->win_enable(manager, zpos);
}
void exynos_drm_crtc_plane_disable(struct drm_crtc *crtc, int zpos)
{
struct exynos_drm_manager *manager = to_exynos_crtc(crtc)->manager;
if (manager->ops->win_disable)
manager->ops->win_disable(manager, zpos);
}
void exynos_drm_crtc_complete_scanout(struct drm_framebuffer *fb)
{
struct exynos_drm_manager *manager;
struct exynos_drm_crtc *exynos_crtc;
struct drm_device *dev = fb->dev;
struct drm_crtc *crtc;
@ -487,15 +354,15 @@ void exynos_drm_crtc_complete_scanout(struct drm_framebuffer *fb)
* for all encoders.
*/
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
manager = to_exynos_crtc(crtc)->manager;
exynos_crtc = to_exynos_crtc(crtc);
/*
* wait for vblank interrupt
* - this makes sure that overlay data are updated to
* real hardware.
*/
if (manager->ops->wait_for_vblank)
manager->ops->wait_for_vblank(manager);
if (exynos_crtc->ops->wait_for_vblank)
exynos_crtc->ops->wait_for_vblank(exynos_crtc);
}
}
@ -508,8 +375,8 @@ int exynos_drm_crtc_get_pipe_from_type(struct drm_device *drm_dev,
struct exynos_drm_crtc *exynos_crtc;
exynos_crtc = to_exynos_crtc(crtc);
if (exynos_crtc->manager->type == out_type)
return exynos_crtc->manager->pipe;
if (exynos_crtc->type == out_type)
return exynos_crtc->pipe;
}
return -EPERM;
@ -517,8 +384,8 @@ int exynos_drm_crtc_get_pipe_from_type(struct drm_device *drm_dev,
void exynos_drm_crtc_te_handler(struct drm_crtc *crtc)
{
struct exynos_drm_manager *manager = to_exynos_crtc(crtc)->manager;
struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
if (manager->ops->te_handler)
manager->ops->te_handler(manager);
if (exynos_crtc->ops->te_handler)
exynos_crtc->ops->te_handler(exynos_crtc);
}

View File

@ -17,14 +17,18 @@
#include "exynos_drm_drv.h"
int exynos_drm_crtc_create(struct exynos_drm_manager *manager);
struct exynos_drm_crtc *exynos_drm_crtc_create(struct drm_device *drm_dev,
int pipe,
enum exynos_drm_output_type type,
struct exynos_drm_crtc_ops *ops,
void *context);
int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int pipe);
void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int pipe);
void exynos_drm_crtc_finish_pageflip(struct drm_device *dev, int pipe);
void exynos_drm_crtc_complete_scanout(struct drm_framebuffer *fb);
void exynos_drm_crtc_plane_mode_set(struct drm_crtc *crtc,
struct exynos_drm_overlay *overlay);
struct exynos_drm_plane *plane);
void exynos_drm_crtc_plane_commit(struct drm_crtc *crtc, int zpos);
void exynos_drm_crtc_plane_enable(struct drm_crtc *crtc, int zpos);
void exynos_drm_crtc_plane_disable(struct drm_crtc *crtc, int zpos);

View File

@ -279,7 +279,3 @@ err_buf_detach:
return ERR_PTR(ret);
}
MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
MODULE_DESCRIPTION("Samsung SoC DRM DMABUF Module");
MODULE_LICENSE("GPL");

View File

@ -12,14 +12,9 @@
#ifndef _EXYNOS_DRM_DMABUF_H_
#define _EXYNOS_DRM_DMABUF_H_
#ifdef CONFIG_DRM_EXYNOS_DMABUF
struct dma_buf *exynos_dmabuf_prime_export(struct drm_device *drm_dev,
struct drm_gem_object *obj, int flags);
struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
struct dma_buf *dma_buf);
#else
#define exynos_dmabuf_prime_export NULL
#define exynos_dmabuf_prime_import NULL
#endif
#endif

View File

@ -556,6 +556,9 @@ static struct platform_driver *const exynos_drm_kms_drivers[] = {
#ifdef CONFIG_DRM_EXYNOS_FIMD
&fimd_driver,
#endif
#ifdef CONFIG_DRM_EXYNOS7_DECON
&decon_driver,
#endif
#ifdef CONFIG_DRM_EXYNOS_DP
&dp_driver,
#endif
@ -612,6 +615,7 @@ static const char * const strings[] = {
"samsung,exynos3",
"samsung,exynos4",
"samsung,exynos5",
"samsung,exynos7",
};
static struct platform_driver exynos_drm_platform_driver = {

View File

@ -23,6 +23,9 @@
#define MAX_FB_BUFFER 4
#define DEFAULT_ZPOS -1
#define to_exynos_crtc(x) container_of(x, struct exynos_drm_crtc, base)
#define to_exynos_plane(x) container_of(x, struct exynos_drm_plane, base)
/* This enumerates device type. */
enum exynos_drm_device_type {
EXYNOS_DEVICE_TYPE_NONE,
@ -44,6 +47,7 @@ enum exynos_drm_output_type {
/*
* Exynos drm common overlay structure.
*
* @base: plane object
* @fb_x: offset x on a framebuffer to be displayed.
* - the unit is screen coordinates.
* @fb_y: offset y on a framebuffer to be displayed.
@ -73,11 +77,14 @@ enum exynos_drm_output_type {
* @local_path: in case of lcd type, local path mode on or off.
* @transparency: transparency on or off.
* @activated: activated or not.
* @enabled: enabled or not.
*
* this structure is common to exynos SoC and its contents would be copied
* to hardware specific overlay info.
*/
struct exynos_drm_overlay {
struct exynos_drm_plane {
struct drm_plane base;
unsigned int fb_x;
unsigned int fb_y;
unsigned int fb_width;
@ -104,6 +111,7 @@ struct exynos_drm_overlay {
bool local_path:1;
bool transparency:1;
bool activated:1;
bool enabled:1;
};
/*
@ -155,11 +163,10 @@ struct exynos_drm_display {
};
/*
* Exynos drm manager ops
* Exynos drm crtc ops
*
* @dpms: control device power.
* @mode_fixup: fix mode data before applying it
* @mode_set: set the given mode to the manager
* @commit: set current hw specific display mode to hw.
* @enable_vblank: specific driver callback for enabling vblank interrupt.
* @disable_vblank: specific driver callback for disabling vblank interrupt.
@ -172,44 +179,49 @@ struct exynos_drm_display {
* @te_handler: trigger to transfer video image at the tearing effect
* synchronization signal if there is a page flip request.
*/
struct exynos_drm_manager;
struct exynos_drm_manager_ops {
void (*dpms)(struct exynos_drm_manager *mgr, int mode);
bool (*mode_fixup)(struct exynos_drm_manager *mgr,
struct exynos_drm_crtc;
struct exynos_drm_crtc_ops {
void (*dpms)(struct exynos_drm_crtc *crtc, int mode);
bool (*mode_fixup)(struct exynos_drm_crtc *crtc,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
void (*mode_set)(struct exynos_drm_manager *mgr,
const struct drm_display_mode *mode);
void (*commit)(struct exynos_drm_manager *mgr);
int (*enable_vblank)(struct exynos_drm_manager *mgr);
void (*disable_vblank)(struct exynos_drm_manager *mgr);
void (*wait_for_vblank)(struct exynos_drm_manager *mgr);
void (*win_mode_set)(struct exynos_drm_manager *mgr,
struct exynos_drm_overlay *overlay);
void (*win_commit)(struct exynos_drm_manager *mgr, int zpos);
void (*win_enable)(struct exynos_drm_manager *mgr, int zpos);
void (*win_disable)(struct exynos_drm_manager *mgr, int zpos);
void (*te_handler)(struct exynos_drm_manager *mgr);
void (*commit)(struct exynos_drm_crtc *crtc);
int (*enable_vblank)(struct exynos_drm_crtc *crtc);
void (*disable_vblank)(struct exynos_drm_crtc *crtc);
void (*wait_for_vblank)(struct exynos_drm_crtc *crtc);
void (*win_mode_set)(struct exynos_drm_crtc *crtc,
struct exynos_drm_plane *plane);
void (*win_commit)(struct exynos_drm_crtc *crtc, int zpos);
void (*win_enable)(struct exynos_drm_crtc *crtc, int zpos);
void (*win_disable)(struct exynos_drm_crtc *crtc, int zpos);
void (*te_handler)(struct exynos_drm_crtc *crtc);
};
/*
* Exynos drm common manager structure, maps 1:1 with a crtc
* Exynos specific crtc structure.
*
* @list: the list entry for this manager
* @base: crtc object.
* @type: one of EXYNOS_DISPLAY_TYPE_LCD and HDMI.
* @drm_dev: pointer to the drm device
* @crtc: crtc object.
* @pipe: the pipe number for this crtc/manager
* @pipe: a crtc index created at load() with a new crtc object creation
* and the crtc object would be set to private->crtc array
* to get a crtc object corresponding to this pipe from private->crtc
* array when irq interrupt occurred. the reason of using this pipe is that
* drm framework doesn't support multiple irq yet.
* we can refer to the crtc to current hardware interrupt occurred through
* this pipe value.
* @dpms: store the crtc dpms value
* @ops: pointer to callbacks for exynos drm specific functionality
* @ctx: A pointer to the manager's implementation specific context
* @ctx: A pointer to the crtc's implementation specific context
*/
struct exynos_drm_manager {
struct list_head list;
enum exynos_drm_output_type type;
struct drm_device *drm_dev;
struct drm_crtc *crtc;
int pipe;
struct exynos_drm_manager_ops *ops;
struct exynos_drm_crtc {
struct drm_crtc base;
enum exynos_drm_output_type type;
unsigned int pipe;
unsigned int dpms;
wait_queue_head_t pending_flip_queue;
atomic_t pending_flip;
struct exynos_drm_crtc_ops *ops;
void *ctx;
};
struct exynos_drm_g2d_private {
@ -246,7 +258,6 @@ struct exynos_drm_private {
*/
struct drm_crtc *crtc[MAX_CRTC];
struct drm_property *plane_zpos_property;
struct drm_property *crtc_mode_property;
unsigned long da_start;
unsigned long da_space_size;
@ -333,6 +344,7 @@ void exynos_drm_component_del(struct device *dev,
enum exynos_drm_device_type dev_type);
extern struct platform_driver fimd_driver;
extern struct platform_driver decon_driver;
extern struct platform_driver dp_driver;
extern struct platform_driver dsi_driver;
extern struct platform_driver mixer_driver;

View File

@ -102,7 +102,7 @@ static void exynos_drm_encoder_disable(struct drm_encoder *encoder)
/* all planes connected to this encoder should be also disabled. */
drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
if (plane->crtc == encoder->crtc)
if (plane->crtc && (plane->crtc == encoder->crtc))
plane->funcs->disable_plane(plane);
}
}

View File

@ -79,9 +79,9 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
struct drm_framebuffer *fb)
{
struct fb_info *fbi = helper->fbdev;
struct drm_device *dev = helper->dev;
struct exynos_drm_gem_buf *buffer;
unsigned int size = fb->width * fb->height * (fb->bits_per_pixel >> 3);
unsigned int nr_pages;
unsigned long offset;
drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
@ -94,25 +94,14 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
return -EFAULT;
}
/* map pages with kernel virtual space. */
if (!buffer->kvaddr) {
if (is_drm_iommu_supported(dev)) {
unsigned int nr_pages = buffer->size >> PAGE_SHIFT;
nr_pages = buffer->size >> PAGE_SHIFT;
buffer->kvaddr = (void __iomem *) vmap(buffer->pages,
nr_pages, VM_MAP,
pgprot_writecombine(PAGE_KERNEL));
} else {
phys_addr_t dma_addr = buffer->dma_addr;
if (dma_addr)
buffer->kvaddr = (void __iomem *)phys_to_virt(dma_addr);
else
buffer->kvaddr = (void __iomem *)NULL;
}
if (!buffer->kvaddr) {
DRM_ERROR("failed to map pages to kernel space.\n");
return -EIO;
}
buffer->kvaddr = (void __iomem *) vmap(buffer->pages,
nr_pages, VM_MAP,
pgprot_writecombine(PAGE_KERNEL));
if (!buffer->kvaddr) {
DRM_ERROR("failed to map pages to kernel space.\n");
return -EIO;
}
/* buffer count to framebuffer always is 1 at booting time. */
@ -313,7 +302,7 @@ static void exynos_drm_fbdev_destroy(struct drm_device *dev,
struct exynos_drm_gem_obj *exynos_gem_obj = exynos_fbd->exynos_gem_obj;
struct drm_framebuffer *fb;
if (is_drm_iommu_supported(dev) && exynos_gem_obj->buffer->kvaddr)
if (exynos_gem_obj->buffer->kvaddr)
vunmap(exynos_gem_obj->buffer->kvaddr);
/* release drm framebuffer and real buffer */

Some files were not shown because too many files have changed in this diff Show More