1
0
Fork 0

main drm pull request for v4.15

-----BEGIN PGP SIGNATURE-----
 
 iQIcBAABAgAGBQJaCm8RAAoJEAx081l5xIa+zX0QAJSm31kCG3vdw2CNiRx25L3q
 3hcsEOgAjVJ9FQVGKFWjzb8TK35tSqtNx5kWIj0VGaIfBE5Bdg5SLLgKKUYas8rY
 4LaphqICq2uxu2BNa2tpiar/sHhAnuozwQ4czpVWXzlaISnb9yYzRl7gMuyUVGkx
 +Gih5VUhLmQC0HsRTLJ3vaZQoUsLAl2gAjKcWa1bx57j2S+iKOPfsLaq7VYo+y1I
 Njc+iSGqMhJzRLXVkxL2lQKaslp7R38Bbh5K4Kvyjkm4Aq7zErOF6irpOXKMcrGl
 mwnr89vf1G9thjikrBaXpKnuvdbWYveoN/ORMlTdCfxkFnChHLnm3bd7NJ49RXDN
 Hv/Iq9YYjmZ9GTatxnx7lWtmXnZXC5he1yn1JAuz/yt7/0b/Wx+Mu/wEpBXYNFTd
 1AZdD586i+AmPo3yDkqH9nBu8JC0W0AnS9VZma4LVvZOP2UfJmj5Im1CLHItbGDN
 FnUCkwyD/lJUUk+WgT+w/GOMJgmFHDiFFl4tFtYVVjrUirpCFVguSKG9xuv6tT8P
 8iRsoP7RrcmDN9ojN2SEHwcpsAv3HnKkDv+9+GIbWnrGsSbCPq8Qm+JDSvf4h22I
 K5lwNpJrcpSKI+q10L7w2xliTBwb98sJkWGA/rssomrdBOWteGZAyqFRYAVgQ+mJ
 x/nJurIqQYh2KQN9+uLG
 =xVV2
 -----END PGP SIGNATURE-----

Merge tag 'drm-for-v4.15' of git://people.freedesktop.org/~airlied/linux

Pull drm updates from Dave Airlie:
 "This is the main drm pull request for v4.15.

  Core:
   - Atomic object lifetime fixes
   - Atomic iterator improvements
   - Sparse/smatch fixes
   - Legacy kms ioctls to be interruptible
   - EDID override improvements
   - fb/gem helper cleanups
   - Simple outreachy patches
   - Documentation improvements
   - Fix dma-buf rcu races
   - DRM mode object leasing for improving VR use cases.
   - vgaarb improvements for non-x86 platforms.

  New driver:
   - tve200: Faraday Technology TVE200 block.

     This "TV Encoder" encodes a ITU-T BT.656 stream and can be found in
     the StorLink SL3516 (later Cortina Systems CS3516) as well as the
     Grain Media GM8180.

  New bridges:
   - SiI9234 support

  New panels:
   - S6E63J0X03, OTM8009A, Seiko 43WVF1G, 7" rpi touch panel, Toshiba
     LT089AC19000, Innolux AT043TN24

  i915:
   - Remove Coffeelake from alpha support
   - Cannonlake workarounds
   - Infoframe refactoring for DisplayPort
   - VBT updates
   - DisplayPort vswing/emph/buffer translation refactoring
   - CCS fixes
   - Restore GPU clock boost on missed vblanks
   - Scatter list updates for userptr allocations
   - Gen9+ transition watermarks
   - Display IPC (Isochronous Priority Control)
   - Private PAT management
   - GVT: improved error handling and pci config sanitizing
   - Execlist refactoring
   - Transparent Huge Page support
   - User defined priorities support
   - HuC/GuC firmware refactoring
   - DP MST fixes
   - eDP power sequencing fixes
   - Use RCU instead of stop_machine
   - PSR state tracking support
   - Eviction fixes
   - BDW DP aux channel timeout fixes
   - LSPCON fixes
   - Cannonlake PLL fixes

  amdgpu:
   - Per VM BO support
   - Powerplay cleanups
   - CI powerplay support
   - PASID mgr for kfd
   - SR-IOV fixes
   - initial GPU reset for vega10
   - Prime mmap support
   - TTM updates
   - Clock query interface for Raven
   - Fence to handle ioctl
   - UVD encode ring support on Polaris
   - Transparent huge page DMA support
   - Compute LRU pipe tweaks
   - BO flag to allow buffers to opt out of implicit sync
   - CTX priority setting API
   - VRAM lost infrastructure plumbing

  qxl:
   - fix flicker since atomic rework

  amdkfd:
   - Further improvements from internal AMD tree
   - Usermode events
   - Drop radeon support

  nouveau:
   - Pascal temperature sensor support
   - Improved BAR2 handling
   - MMU rework to support Pascal MMU

  exynos:
   - Improved HDMI/mixer support
   - HDMI audio interface support

  tegra:
   - Prep work for tegra186
   - Cleanup/fixes

  msm:
   - Preemption support for a5xx
   - Display fixes for 8x96 (snapdragon 820)
   - Async cursor plane fixes
   - FW loading rework
   - GPU debugging improvements

  vc4:
   - Prep for DSI panels
   - fix T-format tiling scanout
   - New madvise ioctl

  Rockchip:
   - LVDS support

  omapdrm:
   - omap4 HDMI CEC support

  etnaviv:
   - GPU performance counters groundwork

  sun4i:
   - refactor driver load + TCON backend
   - HDMI improvements
   - A31 support
   - Misc fixes

  udl:
   - Probe/EDID read fixes.

  tilcdc:
   - Misc fixes.

  pl111:
   - Support more variants

  adv7511:
   - Improve EDID handling.
   - HDMI CEC support

  sii8620:
   - Add remote control support"

* tag 'drm-for-v4.15' of git://people.freedesktop.org/~airlied/linux: (1480 commits)
  drm/rockchip: analogix_dp: Use mutex rather than spinlock
  drm/mode_object: fix documentation for object lookups.
  drm/i915: Reorder context-close to avoid calling i915_vma_close() under RCU
  drm/i915: Move init_clock_gating() back to where it was
  drm/i915: Prune the reservation shared fence array
  drm/i915: Idle the GPU before shinking everything
  drm/i915: Lock llist_del_first() vs llist_del_all()
  drm/i915: Calculate ironlake intermediate watermarks correctly, v2.
  drm/i915: Disable lazy PPGTT page table optimization for vGPU
  drm/i915/execlists: Remove the priority "optimisation"
  drm/i915: Filter out spurious execlists context-switch interrupts
  drm/amdgpu: use irq-safe lock for kiq->ring_lock
  drm/amdgpu: bypass lru touch for KIQ ring submission
  drm/amdgpu: Potential uninitialized variable in amdgpu_vm_update_directories()
  drm/amdgpu: potential uninitialized variable in amdgpu_vce_ring_parse_cs()
  drm/amd/powerplay: initialize a variable before using it
  drm/amd/powerplay: suppress KASAN out of bounds warning in vega10_populate_all_memory_levels
  drm/amd/amdgpu: fix evicted VRAM bo adjudgement condition
  drm/vblank: Tune drm_crtc_accurate_vblank_count() WARN down to a debug
  drm/rockchip: add CONFIG_OF dependency for lvds
  ...
hifive-unleashed-5.1
Linus Torvalds 2017-11-15 20:42:10 -08:00
commit e60e1ee606
1040 changed files with 65899 additions and 56430 deletions

View File

@ -857,7 +857,7 @@
The filter can be disabled or changed to another
driver later using sysfs.
drm_kms_helper.edid_firmware=[<connector>:]<file>[,[<connector>:]<file>]
drm.edid_firmware=[<connector>:]<file>[,[<connector>:]<file>]
Broken monitors, graphic adapters, KVMs and EDIDless
panels may send no or incorrect EDID data sets.
This parameter allows to specify an EDID data sets

View File

@ -68,6 +68,8 @@ Optional properties:
- adi,disable-timing-generator: Only for ADV7533. Disables the internal timing
generator. The chip will rely on the sync signals in the DSI data lanes,
rather than generate its own timings for HDMI output.
- clocks: from common clock binding: reference to the CEC clock.
- clock-names: from common clock binding: must be "cec".
Required nodes:
@ -89,6 +91,8 @@ Example
reg = <39>;
interrupt-parent = <&gpio3>;
interrupts = <29 IRQ_TYPE_EDGE_FALLING>;
clocks = <&cec_clock>;
clock-names = "cec";
adi,input-depth = <8>;
adi,input-colorspace = "rgb";

View File

@ -0,0 +1,49 @@
Silicon Image SiI9234 HDMI/MHL bridge bindings
Required properties:
- compatible : "sil,sii9234".
- reg : I2C address for TPI interface, use 0x39
- avcc33-supply : MHL/USB Switch Supply Voltage (3.3V)
- iovcc18-supply : I/O Supply Voltage (1.8V)
- avcc12-supply : TMDS Analog Supply Voltage (1.2V)
- cvcc12-supply : Digital Core Supply Voltage (1.2V)
- interrupts, interrupt-parent: interrupt specifier of INT pin
- reset-gpios: gpio specifier of RESET pin (active low)
- video interfaces: Device node can contain two video interface port
nodes for HDMI encoder and connector according to [1].
- port@0 - MHL to HDMI
- port@1 - MHL to connector
[1]: Documentation/devicetree/bindings/media/video-interfaces.txt
Example:
sii9234@39 {
compatible = "sil,sii9234";
reg = <0x39>;
avcc33-supply = <&vcc33mhl>;
iovcc18-supply = <&vcc18mhl>;
avcc12-supply = <&vsil12>;
cvcc12-supply = <&vsil12>;
reset-gpios = <&gpf3 4 GPIO_ACTIVE_LOW>;
interrupt-parent = <&gpf3>;
interrupts = <5 IRQ_TYPE_LEVEL_HIGH>;
ports {
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
mhl_to_hdmi: endpoint {
remote-endpoint = <&hdmi_to_mhl>;
};
};
port@1 {
reg = <1>;
mhl_to_connector: endpoint {
remote-endpoint = <&connector_to_mhl>;
};
};
};
};

View File

@ -0,0 +1,54 @@
* Faraday TV Encoder TVE200
Required properties:
- compatible: must be one of:
"faraday,tve200"
"cortina,gemini-tvc", "faraday,tve200"
- reg: base address and size of the control registers block
- interrupts: contains an interrupt specifier for the interrupt
line from the TVE200
- clock-names: should contain "PCLK" for the clock line clocking the
silicon and "TVE" for the 27MHz clock to the video driver
- clocks: contains phandle and clock specifier pairs for the entries
in the clock-names property. See
Documentation/devicetree/bindings/clock/clock-bindings.txt
Optional properties:
- resets: contains the reset line phandle for the block
Required sub-nodes:
- port: describes LCD panel signals, following the common binding
for video transmitter interfaces; see
Documentation/devicetree/bindings/media/video-interfaces.txt
This port should have the properties:
reg = <0>;
It should have one endpoint connected to a remote endpoint where
the display is connected.
Example:
display-controller@6a000000 {
#address-cells = <1>;
#size-cells = <0>;
compatible = "faraday,tve200";
reg = <0x6a000000 0x1000>;
interrupts = <13 IRQ_TYPE_EDGE_RISING>;
resets = <&syscon GEMINI_RESET_TVC>;
clocks = <&syscon GEMINI_CLK_GATE_TVC>,
<&syscon GEMINI_CLK_TVC>;
clock-names = "PCLK", "TVE";
port@0 {
reg = <0>;
display_out: endpoint {
remote-endpoint = <&panel_in>;
};
};
};

View File

@ -13,16 +13,16 @@ Required properties:
- power-domains: Should be <&mmcc MDSS_GDSC>.
- clocks: Phandles to device clocks.
- clock-names: the following clocks are required:
* "mdp_core_clk"
* "iface_clk"
* "bus_clk"
* "core_mmss_clk"
* "byte_clk"
* "pixel_clk"
* "core_clk"
* "mdp_core"
* "iface"
* "bus"
* "core_mmss"
* "byte"
* "pixel"
* "core"
For DSIv2, we need an additional clock:
* "src_clk"
- assigned-clocks: Parents of "byte_clk" and "pixel_clk" for the given platform.
* "src"
- assigned-clocks: Parents of "byte" and "pixel" for the given platform.
- assigned-clock-parents: The Byte clock and Pixel clock PLL outputs provided
by a DSI PHY block. See [1] for details on clock bindings.
- vdd-supply: phandle to vdd regulator device node
@ -101,7 +101,7 @@ Required properties:
- power-domains: Should be <&mmcc MDSS_GDSC>.
- clocks: Phandles to device clocks. See [1] for details on clock bindings.
- clock-names: the following clocks are required:
* "iface_clk"
* "iface"
- vddio-supply: phandle to vdd-io regulator device node
Optional properties:
@ -123,13 +123,13 @@ Example:
reg = <0xfd922800 0x200>;
power-domains = <&mmcc MDSS_GDSC>;
clock-names =
"bus_clk",
"byte_clk",
"core_clk",
"core_mmss_clk",
"iface_clk",
"mdp_core_clk",
"pixel_clk";
"bus",
"byte",
"core",
"core_mmss",
"iface",
"mdp_core",
"pixel";
clocks =
<&mmcc MDSS_AXI_CLK>,
<&mmcc MDSS_BYTE0_CLK>,
@ -207,7 +207,7 @@ Example:
reg = <0xfd922a00 0xd4>,
<0xfd922b00 0x2b0>,
<0xfd922d80 0x7b>;
clock-names = "iface_clk";
clock-names = "iface";
clocks = <&mmcc MDSS_AHB_CLK>;
#clock-cells = <1>;
vddio-supply = <&pma8084_l12>;

View File

@ -12,11 +12,11 @@ Required properties:
- clocks: device clocks
See Documentation/devicetree/bindings/clock/clock-bindings.txt for details.
- clock-names: the following clocks are required:
* "core_clk"
* "iface_clk"
* "mdp_core_clk"
* "pixel_clk"
* "link_clk"
* "core"
* "iface"
* "mdp_core"
* "pixel"
* "link"
- #clock-cells: The value should be 1.
- vdda-supply: phandle to vdda regulator device node
- lvl-vdd-supply: phandle to regulator device node which is used to supply power
@ -41,11 +41,11 @@ Example:
interrupts = <12 0>;
power-domains = <&mmcc MDSS_GDSC>;
clock-names =
"core_clk",
"pixel_clk",
"iface_clk",
"link_clk",
"mdp_core_clk";
"core",
"pixel",
"iface",
"link",
"mdp_core";
clocks =
<&mmcc MDSS_EDPAUX_CLK>,
<&mmcc MDSS_EDPPIXEL_CLK>,

View File

@ -64,9 +64,9 @@ Example:
interrupts = <GIC_SPI 79 0>;
power-domains = <&mmcc MDSS_GDSC>;
clock-names =
"core_clk",
"master_iface_clk",
"slave_iface_clk";
"core",
"master_iface",
"slave_iface";
clocks =
<&mmcc HDMI_APP_CLK>,
<&mmcc HDMI_M_AHB_CLK>,
@ -92,7 +92,7 @@ Example:
<0x4a00500 0x100>;
#phy-cells = <0>;
power-domains = <&mmcc MDSS_GDSC>;
clock-names = "slave_iface_clk";
clock-names = "slave_iface";
clocks = <&mmcc HDMI_S_AHB_CLK>;
core-vdda-supply = <&pm8921_hdmi_mvs>;
};

View File

@ -22,16 +22,16 @@ Required properties:
Documentation/devicetree/bindings/power/power_domain.txt
- clocks: device clocks. See ../clocks/clock-bindings.txt for details.
- clock-names: the following clocks are required.
* "iface_clk"
* "bus_clk"
* "vsync_clk"
* "iface"
* "bus"
* "vsync"
- #address-cells: number of address cells for the MDSS children. Should be 1.
- #size-cells: Should be 1.
- ranges: parent bus address space is the same as the child bus address space.
Optional properties:
- clock-names: the following clocks are optional:
* "lut_clk"
* "lut"
MDP5:
Required properties:
@ -45,10 +45,10 @@ Required properties:
through MDP block
- clocks: device clocks. See ../clocks/clock-bindings.txt for details.
- clock-names: the following clocks are required.
- * "bus_clk"
- * "iface_clk"
- * "core_clk"
- * "vsync_clk"
- * "bus"
- * "iface"
- * "core"
- * "vsync"
- ports: contains the list of output ports from MDP. These connect to interfaces
that are external to the MDP hardware, such as HDMI, DSI, EDP etc (LVDS is a
special case since it is a part of the MDP block itself).
@ -77,7 +77,7 @@ Required properties:
Optional properties:
- clock-names: the following clocks are optional:
* "lut_clk"
* "lut"
Example:
@ -95,9 +95,9 @@ Example:
clocks = <&gcc GCC_MDSS_AHB_CLK>,
<&gcc GCC_MDSS_AXI_CLK>,
<&gcc GCC_MDSS_VSYNC_CLK>;
clock-names = "iface_clk",
"bus_clk",
"vsync_clk"
clock-names = "iface",
"bus",
"vsync"
interrupts = <0 72 0>;
@ -120,10 +120,10 @@ Example:
<&gcc GCC_MDSS_AXI_CLK>,
<&gcc GCC_MDSS_MDP_CLK>,
<&gcc GCC_MDSS_VSYNC_CLK>;
clock-names = "iface_clk",
"bus_clk",
"core_clk",
"vsync_clk";
clock-names = "iface",
"bus",
"core",
"vsync";
ports {
#address-cells = <1>;

View File

@ -0,0 +1,21 @@
Orise Tech OTM8009A 3.97" 480x800 TFT LCD panel (MIPI-DSI video mode)
The Orise Tech OTM8009A is a 3.97" 480x800 TFT LCD panel connected using
a MIPI-DSI video interface. Its backlight is managed through the DSI link.
Required properties:
- compatible: "orisetech,otm8009a"
- reg: the virtual channel number of a DSI peripheral
Optional properties:
- reset-gpios: a GPIO spec for the reset pin (active low).
Example:
&dsi {
...
panel@0 {
compatible = "orisetech,otm8009a";
reg = <0>;
reset-gpios = <&gpioh 7 GPIO_ACTIVE_LOW>;
};
};

View File

@ -0,0 +1,49 @@
This binding covers the official 7" (800x480) Raspberry Pi touchscreen
panel.
This DSI panel contains:
- TC358762 DSI->DPI bridge
- Atmel microcontroller on I2C for power sequencing the DSI bridge and
controlling backlight
- Touchscreen controller on I2C for touch input
and this binding covers the DSI display parts but not its touch input.
Required properties:
- compatible: Must be "raspberrypi,7inch-touchscreen-panel"
- reg: Must be "45"
- port: See panel-common.txt
Example:
dsi1: dsi@7e700000 {
#address-cells = <1>;
#size-cells = <0>;
<...>
port {
dsi_out_port: endpoint {
remote-endpoint = <&panel_dsi_port>;
};
};
};
i2c_dsi: i2c {
compatible = "i2c-gpio";
#address-cells = <1>;
#size-cells = <0>;
gpios = <&gpio 28 0
&gpio 29 0>;
lcd@45 {
compatible = "raspberrypi,7inch-touchscreen-panel";
reg = <0x45>;
port {
panel_dsi_port: endpoint {
remote-endpoint = <&dsi_out_port>;
};
};
};
};

View File

@ -0,0 +1,24 @@
Samsung S6E63J0X03 1.63" 320x320 AMOLED panel (interface: MIPI-DSI command mode)
Required properties:
- compatible: "samsung,s6e63j0x03"
- reg: the virtual channel number of a DSI peripheral
- vdd3-supply: I/O voltage supply
- vci-supply: voltage supply for analog circuits
- reset-gpios: a GPIO spec for the reset pin (active low)
- te-gpios: a GPIO spec for the tearing effect synchronization signal
gpio pin (active high)
Example:
&dsi {
...
panel@0 {
compatible = "samsung,s6e63j0x03";
reg = <0>;
vdd3-supply = <&ldo16_reg>;
vci-supply = <&ldo20_reg>;
reset-gpios = <&gpe0 1 GPIO_ACTIVE_LOW>;
te-gpios = <&gpx0 6 GPIO_ACTIVE_HIGH>;
};
};

View File

@ -0,0 +1,23 @@
Seiko Instruments Inc. 4.3" WVGA (800 x RGB x 480) TFT with Touch-Panel
Required properties:
- compatible: should be "sii,43wvf1g".
- "dvdd-supply": 3v3 digital regulator.
- "avdd-supply": 5v analog regulator.
Optional properties:
- backlight: phandle for the backlight control.
Example:
panel {
compatible = "sii,43wvf1g";
backlight = <&backlight_display>;
dvdd-supply = <&reg_lcd_3v3>;
avdd-supply = <&reg_lcd_5v>;
port {
panel_in: endpoint {
remote-endpoint = <&display_out>;
};
};
};

View File

@ -0,0 +1,8 @@
Toshiba 8.9" WXGA (1280x768) TFT LCD panel
Required properties:
- compatible: should be "toshiba,lt089ac29000.txt"
- power-supply: as specified in the base binding
This binding is compatible with the simple-panel binding, which is specified
in simple-panel.txt in this directory.

View File

@ -0,0 +1,99 @@
Rockchip RK3288 LVDS interface
================================
Required properties:
- compatible: matching the soc type, one of
- "rockchip,rk3288-lvds";
- reg: physical base address of the controller and length
of memory mapped region.
- clocks: must include clock specifiers corresponding to entries in the
clock-names property.
- clock-names: must contain "pclk_lvds"
- avdd1v0-supply: regulator phandle for 1.0V analog power
- avdd1v8-supply: regulator phandle for 1.8V analog power
- avdd3v3-supply: regulator phandle for 3.3V analog power
- rockchip,grf: phandle to the general register files syscon
- rockchip,output: "rgb", "lvds" or "duallvds", This describes the output interface
Optional properties:
- pinctrl-names: must contain a "lcdc" entry.
- pinctrl-0: pin control group to be used for this controller.
Required nodes:
The lvds has two video ports as described by
Documentation/devicetree/bindings/media/video-interfaces.txt
Their connections are modeled using the OF graph bindings specified in
Documentation/devicetree/bindings/graph.txt.
- video port 0 for the VOP input, the remote endpoint maybe vopb or vopl
- video port 1 for either a panel or subsequent encoder
the lvds panel described by
Documentation/devicetree/bindings/display/panel/simple-panel.txt
Panel required properties:
- ports for remote LVDS output
Panel optional properties:
- data-mapping: should be "vesa-24","jeida-24" or "jeida-18".
This describes decribed by:
Documentation/devicetree/bindings/display/panel/panel-lvds.txt
Example:
lvds_panel: lvds-panel {
compatible = "auo,b101ean01";
enable-gpios = <&gpio7 21 GPIO_ACTIVE_HIGH>;
data-mapping = "jeida-24";
ports {
panel_in_lvds: endpoint {
remote-endpoint = <&lvds_out_panel>;
};
};
};
For Rockchip RK3288:
lvds: lvds@ff96c000 {
compatible = "rockchip,rk3288-lvds";
rockchip,grf = <&grf>;
reg = <0xff96c000 0x4000>;
clocks = <&cru PCLK_LVDS_PHY>;
clock-names = "pclk_lvds";
pinctrl-names = "lcdc";
pinctrl-0 = <&lcdc_ctl>;
avdd1v0-supply = <&vdd10_lcd>;
avdd1v8-supply = <&vcc18_lcd>;
avdd3v3-supply = <&vcca_33>;
rockchip,output = "rgb";
ports {
#address-cells = <1>;
#size-cells = <0>;
lvds_in: port@0 {
reg = <0>;
lvds_in_vopb: endpoint@0 {
reg = <0>;
remote-endpoint = <&vopb_out_lvds>;
};
lvds_in_vopl: endpoint@1 {
reg = <1>;
remote-endpoint = <&vopl_out_lvds>;
};
};
lvds_out: port@1 {
reg = <1>;
lvds_out_panel: endpoint {
remote-endpoint = <&panel_in_lvds>;
};
};
};
};

View File

@ -40,15 +40,19 @@ CEC. It is one end of the pipeline.
Required properties:
- compatible: value must be one of:
* allwinner,sun4i-a10-hdmi
* allwinner,sun5i-a10s-hdmi
* allwinner,sun6i-a31-hdmi
- reg: base address and size of memory-mapped region
- interrupts: interrupt associated to this IP
- clocks: phandles to the clocks feeding the HDMI encoder
* ahb: the HDMI interface clock
* mod: the HDMI module clock
* ddc: the HDMI ddc clock (A31 only)
* pll-0: the first video PLL
* pll-1: the second video PLL
- clock-names: the clock names mentioned above
- resets: phandle to the reset control for the HDMI encoder (A31 only)
- dmas: phandles to the DMA channels used by the HDMI encoder
* ddc-tx: The channel for DDC transmission
* ddc-rx: The channel for DDC reception
@ -83,9 +87,11 @@ The TCON acts as a timing controller for RGB, LVDS and TV interfaces.
Required properties:
- compatible: value must be either:
* allwinner,sun4i-a10-tcon
* allwinner,sun5i-a13-tcon
* allwinner,sun6i-a31-tcon
* allwinner,sun6i-a31s-tcon
* allwinner,sun7i-a20-tcon
* allwinner,sun8i-a33-tcon
* allwinner,sun8i-v3s-tcon
- reg: base address and size of memory-mapped region
@ -150,8 +156,10 @@ system.
Required properties:
- compatible: value must be one of:
* allwinner,sun4i-a10-display-backend
* allwinner,sun5i-a13-display-backend
* allwinner,sun6i-a31-display-backend
* allwinner,sun7i-a20-display-backend
* allwinner,sun8i-a33-display-backend
- reg: base address and size of the memory-mapped region.
- interrupts: interrupt associated to this IP
@ -182,8 +190,10 @@ deinterlacing and color space conversion.
Required properties:
- compatible: value must be one of:
* allwinner,sun4i-a10-display-frontend
* allwinner,sun5i-a13-display-frontend
* allwinner,sun6i-a31-display-frontend
* allwinner,sun7i-a20-display-frontend
* allwinner,sun8i-a33-display-frontend
- reg: base address and size of the memory-mapped region.
- interrupts: interrupt associated to this IP
@ -228,10 +238,12 @@ extra node.
Required properties:
- compatible: value must be one of:
* allwinner,sun4i-a10-display-engine
* allwinner,sun5i-a10s-display-engine
* allwinner,sun5i-a13-display-engine
* allwinner,sun6i-a31-display-engine
* allwinner,sun6i-a31s-display-engine
* allwinner,sun7i-a20-display-engine
* allwinner,sun8i-a33-display-engine
* allwinner,sun8i-v3s-display-engine

View File

@ -3,6 +3,10 @@ NVIDIA Tegra host1x
Required properties:
- compatible: "nvidia,tegra<chip>-host1x"
- reg: Physical base address and length of the controller's registers.
For pre-Tegra186, one entry describing the whole register area.
For Tegra186, one entry for each entry in reg-names:
"vm" - VM region assigned to Linux
"hypervisor" - Hypervisor region (only if Linux acts as hypervisor)
- interrupts: The interrupt outputs from the controller.
- #address-cells: The number of cells used to represent physical base addresses
in the host1x address space. Should be 1.

View File

@ -254,6 +254,7 @@ opencores OpenCores.org
openrisc OpenRISC.io
option Option NV
ORCL Oracle Corporation
orisetech Orise Technology
ortustech Ortus Technology Co., Ltd.
ovti OmniVision Technologies
oxsemi Oxford Semiconductor, Ltd.

View File

@ -168,6 +168,61 @@ IOCTL Support on Device Nodes
.. kernel-doc:: drivers/gpu/drm/drm_ioctl.c
:doc: driver specific ioctls
Recommended IOCTL Return Values
-------------------------------
In theory a driver's IOCTL callback is only allowed to return very few error
codes. In practice it's good to abuse a few more. This section documents common
practice within the DRM subsystem:
ENOENT:
Strictly this should only be used when a file doesn't exist e.g. when
calling the open() syscall. We reuse that to signal any kind of object
lookup failure, e.g. for unknown GEM buffer object handles, unknown KMS
object handles and similar cases.
ENOSPC:
Some drivers use this to differentiate "out of kernel memory" from "out
of VRAM". Sometimes also applies to other limited gpu resources used for
rendering (e.g. when you have a special limited compression buffer).
Sometimes resource allocation/reservation issues in command submission
IOCTLs are also signalled through EDEADLK.
Simply running out of kernel/system memory is signalled through ENOMEM.
EPERM/EACCESS:
Returned for an operation that is valid, but needs more privileges.
E.g. root-only or much more common, DRM master-only operations return
this when when called by unpriviledged clients. There's no clear
difference between EACCESS and EPERM.
ENODEV:
Feature (like PRIME, modesetting, GEM) is not supported by the driver.
ENXIO:
Remote failure, either a hardware transaction (like i2c), but also used
when the exporting driver of a shared dma-buf or fence doesn't support a
feature needed.
EINTR:
DRM drivers assume that userspace restarts all IOCTLs. Any DRM IOCTL can
return EINTR and in such a case should be restarted with the IOCTL
parameters left unchanged.
EIO:
The GPU died and couldn't be resurrected through a reset. Modesetting
hardware failures are signalled through the "link status" connector
property.
EINVAL:
Catch-all for anything that is an invalid argument combination which
cannot work.
IOCTL also use other error codes like ETIME, EFAULT, EBUSY, ENOTTY but their
usage is in line with the common meanings. The above list tries to just document
DRM specific patterns. Note that ENOTTY has the slightly unintuitive meaning of
"this IOCTL does not exist", and is used exactly as such in DRM.
.. kernel-doc:: include/drm/drm_ioctl.h
:internal:

View File

@ -15,6 +15,7 @@ Linux GPU Driver Developer's Guide
pl111
tegra
tinydrm
tve200
vc4
vga-switcheroo
vgaarbiter

View File

@ -75,17 +75,6 @@ helpers.
Contact: Ville Syrjälä, Daniel Vetter, driver maintainers
Implement deferred fbdev setup in the helper
--------------------------------------------
Many (especially embedded drivers) want to delay fbdev setup until there's a
real screen plugged in. This is to avoid the dreaded fallback to the low-res
fbdev default. Many drivers have a hacked-up (and often broken) version of this,
better to do it once in the shared helpers. Thierry has a patch series, but that
one needs to be rebased and final polish applied.
Contact: Thierry Reding, Daniel Vetter, driver maintainers
Convert early atomic drivers to async commit helpers
----------------------------------------------------
@ -138,6 +127,8 @@ interfaces to fix these issues:
the acquire context explicitly on stack and then also pass it down into
drivers explicitly so that the legacy-on-atomic functions can use them.
Except for some driver code this is done.
* A bunch of the vtable hooks are now in the wrong place: DRM has a split
between core vfunc tables (named ``drm_foo_funcs``), which are used to
implement the userspace ABI. And then there's the optional hooks for the
@ -151,6 +142,8 @@ interfaces to fix these issues:
connector at runtime. That's almost all of them, and would allow us to get
rid of a lot of ``best_encoder`` boilerplate in drivers.
This was almost done, but new drivers added a few more cases again.
Contact: Daniel Vetter
Get rid of dev->struct_mutex from GEM drivers
@ -177,15 +170,20 @@ following drivers still use ``struct_mutex``: ``msm``, ``omapdrm`` and
Contact: Daniel Vetter, respective driver maintainers
Convert instances of dev_info/dev_err/dev_warn to their DRM_DEV_* equivalent
----------------------------------------------------------------------------
For drivers which could have multiple instances, it is necessary to
differentiate between which is which in the logs. Since DRM_INFO/WARN/ERROR
don't do this, drivers used dev_info/warn/err to make this differentiation. We
now have DRM_DEV_* variants of the drm print macros, so we can start to convert
those drivers back to using drm-formwatted specific log messages.
Contact: Sean Paul, Maintainer of the driver you plan to convert
Core refactorings
=================
Use new IDR deletion interface to clean up drm_gem_handle_delete()
------------------------------------------------------------------
See the "This is gross" comment -- apparently the IDR system now can return an
error code instead of oopsing.
Clean up the DRM header mess
----------------------------
@ -306,6 +304,18 @@ There's a bunch of issues with it:
Contact: Daniel Vetter
KMS cleanups
------------
Some of these date from the very introduction of KMS in 2008 ...
- drm_mode_config.crtc_idr is misnamed, since it contains all KMS object. Should
be renamed to drm_mode_config.object_idr.
- drm_display_mode doesn't need to be derived from drm_mode_object. That's
leftovers from older (never merged into upstream) KMS designs where modes
where set using their ID, including support to add/remove modes.
Better Testing
==============
@ -353,7 +363,16 @@ those drivers as simple as possible, so lots of room for refactoring:
- backlight helpers, probably best to put them into a new drm_backlight.c.
This is because drivers/video is de-facto unmaintained. We could also
move drivers/video/backlight to drivers/gpu/backlight and take it all
over within drm-misc, but that's more work.
over within drm-misc, but that's more work. Backlight helpers require a fair
bit of reworking and refactoring. A simple example is the enabling of a backlight.
Tinydrm has helpers for this. It would be good if other drivers can also use the
helper. However, there are various cases we need to consider i.e different
drivers seem to have different ways of enabling/disabling a backlight.
We also need to consider the backlight drivers (like gpio_backlight). The situation
is further complicated by the fact that the backlight is tied to fbdev
via fb_notifier_callback() which has complicated logic. For further details, refer
to the following discussion thread:
https://groups.google.com/forum/#!topic/outreachy-kernel/8rBe30lwtdA
- spi helpers, probably best put into spi core/helper code. Thierry said
the spi maintainer is fast&reactive, so shouldn't be a big issue.

View File

@ -0,0 +1,6 @@
==================================
drm/tve200 Faraday TV Encoder 200
==================================
.. kernel-doc:: drivers/gpu/drm/tve200/tve200_drv.c
:doc: Faraday TV Encoder 200

View File

@ -754,8 +754,6 @@ F: drivers/gpu/drm/amd/amdkfd/
F: drivers/gpu/drm/amd/include/cik_structs.h
F: drivers/gpu/drm/amd/include/kgd_kfd_interface.h
F: drivers/gpu/drm/amd/include/vi_structs.h
F: drivers/gpu/drm/radeon/radeon_kfd.c
F: drivers/gpu/drm/radeon/radeon_kfd.h
F: include/uapi/linux/kfd_ioctl.h
AMD SEATTLE DEVICE TREE SUPPORT
@ -4400,6 +4398,12 @@ T: git git://anongit.freedesktop.org/drm/drm-misc
S: Maintained
F: drivers/gpu/drm/bochs/
DRM DRIVER FOR FARADAY TVE200 TV ENCODER
M: Linus Walleij <linus.walleij@linaro.org>
T: git git://anongit.freedesktop.org/drm/drm-misc
S: Maintained
F: drivers/gpu/drm/tve200/
DRM DRIVER FOR INTEL I810 VIDEO CARDS
S: Orphan / Obsolete
F: drivers/gpu/drm/i810/
@ -4543,7 +4547,7 @@ L: dri-devel@lists.freedesktop.org
S: Supported
F: drivers/gpu/drm/sun4i/
F: Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt
T: git git://git.kernel.org/pub/scm/linux/kernel/git/mripard/linux.git
T: git git://anongit.freedesktop.org/drm/drm-misc
DRM DRIVERS FOR AMLOGIC SOCS
M: Neil Armstrong <narmstrong@baylibre.com>
@ -4727,7 +4731,7 @@ T: git git://anongit.freedesktop.org/drm/drm-misc
DRM PANEL DRIVERS
M: Thierry Reding <thierry.reding@gmail.com>
L: dri-devel@lists.freedesktop.org
T: git git://anongit.freedesktop.org/tegra/linux.git
T: git git://anongit.freedesktop.org/drm/drm-misc
S: Maintained
F: drivers/gpu/drm/drm_panel.c
F: drivers/gpu/drm/panel/
@ -5495,6 +5499,7 @@ F: drivers/net/wan/sdla.c
FRAMEBUFFER LAYER
M: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
L: dri-devel@lists.freedesktop.org
L: linux-fbdev@vger.kernel.org
T: git git://github.com/bzolnier/linux.git
Q: http://patchwork.kernel.org/project/linux-fbdev/list/

View File

@ -1740,15 +1740,3 @@ static void fixup_hide_host_resource_fsl(struct pci_dev *dev)
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MOTOROLA, PCI_ANY_ID, fixup_hide_host_resource_fsl);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID, fixup_hide_host_resource_fsl);
static void fixup_vga(struct pci_dev *pdev)
{
u16 cmd;
pci_read_config_word(pdev, PCI_COMMAND, &cmd);
if ((cmd & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) || !vga_default_device())
vga_set_default_device(pdev);
}
DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID,
PCI_CLASS_DISPLAY_VGA, 8, fixup_vga);

View File

@ -625,7 +625,7 @@ EXPORT_SYMBOL_GPL(dma_buf_detach);
struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
enum dma_data_direction direction)
{
struct sg_table *sg_table = ERR_PTR(-EINVAL);
struct sg_table *sg_table;
might_sleep();

View File

@ -266,8 +266,7 @@ EXPORT_SYMBOL(reservation_object_add_excl_fence);
* @dst: the destination reservation object
* @src: the source reservation object
*
* Copy all fences from src to dst. Both src->lock as well as dst-lock must be
* held.
* Copy all fences from src to dst. dst-lock must be held.
*/
int reservation_object_copy_fences(struct reservation_object *dst,
struct reservation_object *src)
@ -277,33 +276,62 @@ int reservation_object_copy_fences(struct reservation_object *dst,
size_t size;
unsigned i;
src_list = reservation_object_get_list(src);
rcu_read_lock();
src_list = rcu_dereference(src->fence);
retry:
if (src_list) {
size = offsetof(typeof(*src_list),
shared[src_list->shared_count]);
unsigned shared_count = src_list->shared_count;
size = offsetof(typeof(*src_list), shared[shared_count]);
rcu_read_unlock();
dst_list = kmalloc(size, GFP_KERNEL);
if (!dst_list)
return -ENOMEM;
dst_list->shared_count = src_list->shared_count;
dst_list->shared_max = src_list->shared_count;
for (i = 0; i < src_list->shared_count; ++i)
dst_list->shared[i] =
dma_fence_get(src_list->shared[i]);
rcu_read_lock();
src_list = rcu_dereference(src->fence);
if (!src_list || src_list->shared_count > shared_count) {
kfree(dst_list);
goto retry;
}
dst_list->shared_count = 0;
dst_list->shared_max = shared_count;
for (i = 0; i < src_list->shared_count; ++i) {
struct dma_fence *fence;
fence = rcu_dereference(src_list->shared[i]);
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
&fence->flags))
continue;
if (!dma_fence_get_rcu(fence)) {
kfree(dst_list);
src_list = rcu_dereference(src->fence);
goto retry;
}
if (dma_fence_is_signaled(fence)) {
dma_fence_put(fence);
continue;
}
dst_list->shared[dst_list->shared_count++] = fence;
}
} else {
dst_list = NULL;
}
new = dma_fence_get_rcu_safe(&src->fence_excl);
rcu_read_unlock();
kfree(dst->staged);
dst->staged = NULL;
src_list = reservation_object_get_list(dst);
old = reservation_object_get_excl(dst);
new = reservation_object_get_excl(src);
dma_fence_get(new);
preempt_disable();
write_seqcount_begin(&dst->seq);

View File

@ -321,8 +321,16 @@ static int sw_sync_debugfs_open(struct inode *inode, struct file *file)
static int sw_sync_debugfs_release(struct inode *inode, struct file *file)
{
struct sync_timeline *obj = file->private_data;
struct sync_pt *pt, *next;
smp_wmb();
spin_lock_irq(&obj->lock);
list_for_each_entry_safe(pt, next, &obj->pt_list, link) {
dma_fence_set_error(&pt->base, -ENOENT);
dma_fence_signal_locked(&pt->base);
}
spin_unlock_irq(&obj->lock);
sync_timeline_put(obj);
return 0;

View File

@ -110,7 +110,7 @@ config DRM_FBDEV_OVERALLOC
config DRM_LOAD_EDID_FIRMWARE
bool "Allow to specify an EDID data set instead of probing for it"
depends on DRM_KMS_HELPER
depends on DRM
help
Say Y here, if you want to use EDID data to be loaded from the
/lib/firmware directory or one of the provided built-in
@ -184,6 +184,7 @@ config DRM_AMDGPU
select BACKLIGHT_CLASS_DEVICE
select BACKLIGHT_LCD_SUPPORT
select INTERVAL_TREE
select CHASH
help
Choose this option if you have a recent AMD Radeon graphics card.
@ -191,6 +192,8 @@ config DRM_AMDGPU
source "drivers/gpu/drm/amd/amdgpu/Kconfig"
source "drivers/gpu/drm/amd/lib/Kconfig"
source "drivers/gpu/drm/nouveau/Kconfig"
source "drivers/gpu/drm/i915/Kconfig"
@ -278,6 +281,8 @@ source "drivers/gpu/drm/tinydrm/Kconfig"
source "drivers/gpu/drm/pl111/Kconfig"
source "drivers/gpu/drm/tve200/Kconfig"
# Keep legacy drivers last
menuconfig DRM_LEGACY

View File

@ -18,7 +18,7 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \
drm_encoder.o drm_mode_object.o drm_property.o \
drm_plane.o drm_color_mgmt.o drm_print.o \
drm_dumb_buffers.o drm_mode_config.o drm_vblank.o \
drm_syncobj.o
drm_syncobj.o drm_lease.o
drm-$(CONFIG_DRM_LIB_RANDOM) += lib/drm_random.o
drm-$(CONFIG_DRM_VM) += drm_vm.o
@ -29,6 +29,7 @@ drm-$(CONFIG_DRM_PANEL) += drm_panel.o
drm-$(CONFIG_OF) += drm_of.o
drm-$(CONFIG_AGP) += drm_agpsupport.o
drm-$(CONFIG_DEBUG_FS) += drm_debugfs.o drm_debugfs_crc.o
drm-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \
drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o \
@ -37,7 +38,6 @@ drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \
drm_scdc_helper.o drm_gem_framebuffer_helper.o
drm_kms_helper-$(CONFIG_DRM_PANEL_BRIDGE) += bridge/panel.o
drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o
drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o
drm_kms_helper-$(CONFIG_DRM_DP_AUX_CHARDEV) += drm_dp_aux_dev.o
@ -45,14 +45,13 @@ drm_kms_helper-$(CONFIG_DRM_DP_AUX_CHARDEV) += drm_dp_aux_dev.o
obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o
obj-$(CONFIG_DRM_DEBUG_MM_SELFTEST) += selftests/
CFLAGS_drm_trace_points.o := -I$(src)
obj-$(CONFIG_DRM) += drm.o
obj-$(CONFIG_DRM_MIPI_DSI) += drm_mipi_dsi.o
obj-$(CONFIG_DRM_ARM) += arm/
obj-$(CONFIG_DRM_TTM) += ttm/
obj-$(CONFIG_DRM_TDFX) += tdfx/
obj-$(CONFIG_DRM_R128) += r128/
obj-y += amd/lib/
obj-$(CONFIG_HSA_AMD) += amd/amdkfd/
obj-$(CONFIG_DRM_RADEON)+= radeon/
obj-$(CONFIG_DRM_AMDGPU)+= amd/amdgpu/
@ -101,3 +100,4 @@ obj-$(CONFIG_DRM_ZTE) += zte/
obj-$(CONFIG_DRM_MXSFB) += mxsfb/
obj-$(CONFIG_DRM_TINYDRM) += tinydrm/
obj-$(CONFIG_DRM_PL111) += pl111/
obj-$(CONFIG_DRM_TVE200) += tve200/

View File

@ -26,7 +26,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
amdgpu_prime.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \
amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \
amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o amdgpu_atomfirmware.o \
amdgpu_queue_mgr.o amdgpu_vf_error.o
amdgpu_queue_mgr.o amdgpu_vf_error.o amdgpu_sched.o
# add asic specific block
amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \
@ -134,5 +134,3 @@ include $(FULL_AMD_PATH)/powerplay/Makefile
amdgpu-y += $(AMD_POWERPLAY_FILES)
obj-$(CONFIG_DRM_AMDGPU)+= amdgpu.o
CFLAGS_amdgpu_trace_points.o := -I$(src)

View File

@ -65,6 +65,7 @@
#include "amdgpu_uvd.h"
#include "amdgpu_vce.h"
#include "amdgpu_vcn.h"
#include "amdgpu_mn.h"
#include "gpu_scheduler.h"
#include "amdgpu_virt.h"
@ -91,7 +92,7 @@ extern int amdgpu_dpm;
extern int amdgpu_fw_load_type;
extern int amdgpu_aspm;
extern int amdgpu_runtime_pm;
extern unsigned amdgpu_ip_block_mask;
extern uint amdgpu_ip_block_mask;
extern int amdgpu_bapm;
extern int amdgpu_deep_color;
extern int amdgpu_vm_size;
@ -104,14 +105,14 @@ extern int amdgpu_sched_jobs;
extern int amdgpu_sched_hw_submission;
extern int amdgpu_no_evict;
extern int amdgpu_direct_gma_size;
extern unsigned amdgpu_pcie_gen_cap;
extern unsigned amdgpu_pcie_lane_cap;
extern unsigned amdgpu_cg_mask;
extern unsigned amdgpu_pg_mask;
extern unsigned amdgpu_sdma_phase_quantum;
extern uint amdgpu_pcie_gen_cap;
extern uint amdgpu_pcie_lane_cap;
extern uint amdgpu_cg_mask;
extern uint amdgpu_pg_mask;
extern uint amdgpu_sdma_phase_quantum;
extern char *amdgpu_disable_cu;
extern char *amdgpu_virtual_display;
extern unsigned amdgpu_pp_feature_mask;
extern uint amdgpu_pp_feature_mask;
extern int amdgpu_vram_page_split;
extern int amdgpu_ngg;
extern int amdgpu_prim_buf_per_se;
@ -120,6 +121,7 @@ extern int amdgpu_cntl_sb_buf_per_se;
extern int amdgpu_param_buf_per_se;
extern int amdgpu_job_hang_limit;
extern int amdgpu_lbpw;
extern int amdgpu_compute_multipipe;
#ifdef CONFIG_DRM_AMDGPU_SI
extern int amdgpu_si_support;
@ -178,6 +180,7 @@ struct amdgpu_cs_parser;
struct amdgpu_job;
struct amdgpu_irq_src;
struct amdgpu_fpriv;
struct amdgpu_bo_va_mapping;
enum amdgpu_cp_irq {
AMDGPU_CP_IRQ_GFX_EOP = 0,
@ -292,14 +295,25 @@ struct amdgpu_buffer_funcs {
/* provided by hw blocks that can write ptes, e.g., sdma */
struct amdgpu_vm_pte_funcs {
/* number of dw to reserve per operation */
unsigned copy_pte_num_dw;
/* copy pte entries from GART */
void (*copy_pte)(struct amdgpu_ib *ib,
uint64_t pe, uint64_t src,
unsigned count);
/* write pte one entry at a time with addr mapping */
void (*write_pte)(struct amdgpu_ib *ib, uint64_t pe,
uint64_t value, unsigned count,
uint32_t incr);
/* maximum nums of PTEs/PDEs in a single operation */
uint32_t set_max_nums_pte_pde;
/* number of dw to reserve per operation */
unsigned set_pte_pde_num_dw;
/* for linear pte/pde updates without addr mapping */
void (*set_pte_pde)(struct amdgpu_ib *ib,
uint64_t pe,
@ -332,6 +346,7 @@ struct amdgpu_gart_funcs {
struct amdgpu_ih_funcs {
/* ring read/write ptr handling, called from interrupt context */
u32 (*get_wptr)(struct amdgpu_device *adev);
bool (*prescreen_iv)(struct amdgpu_device *adev);
void (*decode_iv)(struct amdgpu_device *adev,
struct amdgpu_iv_entry *entry);
void (*set_rptr)(struct amdgpu_device *adev);
@ -399,6 +414,7 @@ void amdgpu_gem_prime_unpin(struct drm_gem_object *obj);
struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *);
void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj);
void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
int amdgpu_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
int amdgpu_gem_debugfs_init(struct amdgpu_device *adev);
/* sub-allocation manager, it has to be protected by another lock.
@ -455,9 +471,10 @@ struct amdgpu_sa_bo {
*/
void amdgpu_gem_force_release(struct amdgpu_device *adev);
int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
int alignment, u32 initial_domain,
u64 flags, bool kernel,
struct drm_gem_object **obj);
int alignment, u32 initial_domain,
u64 flags, bool kernel,
struct reservation_object *resv,
struct drm_gem_object **obj);
int amdgpu_mode_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
@ -715,10 +732,14 @@ struct amdgpu_ctx {
struct amdgpu_device *adev;
struct amdgpu_queue_mgr queue_mgr;
unsigned reset_counter;
uint32_t vram_lost_counter;
spinlock_t ring_lock;
struct dma_fence **fences;
struct amdgpu_ctx_ring rings[AMDGPU_MAX_RINGS];
bool preamble_presented;
bool preamble_presented;
enum amd_sched_priority init_priority;
enum amd_sched_priority override_priority;
struct mutex lock;
};
struct amdgpu_ctx_mgr {
@ -731,17 +752,22 @@ struct amdgpu_ctx_mgr {
struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id);
int amdgpu_ctx_put(struct amdgpu_ctx *ctx);
uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
struct dma_fence *fence);
int amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
struct dma_fence *fence, uint64_t *seq);
struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
struct amdgpu_ring *ring, uint64_t seq);
void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
enum amd_sched_priority priority);
int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx, unsigned ring_id);
void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr);
void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr);
/*
* file private structure
*/
@ -753,7 +779,6 @@ struct amdgpu_fpriv {
struct mutex bo_list_lock;
struct idr bo_list_handles;
struct amdgpu_ctx_mgr ctx_mgr;
u32 vram_lost_counter;
};
/*
@ -854,7 +879,7 @@ struct amdgpu_mec {
struct amdgpu_kiq {
u64 eop_gpu_addr;
struct amdgpu_bo *eop_obj;
struct mutex ring_mutex;
spinlock_t ring_lock;
struct amdgpu_ring ring;
struct amdgpu_irq_src irq;
};
@ -1014,11 +1039,14 @@ struct amdgpu_gfx {
/* reset mask */
uint32_t grbm_soft_reset;
uint32_t srbm_soft_reset;
bool in_reset;
/* s3/s4 mask */
bool in_suspend;
/* NGG */
struct amdgpu_ngg ngg;
/* pipe reservation */
struct mutex pipe_reserve_mutex;
DECLARE_BITMAP (pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
};
int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
@ -1056,6 +1084,7 @@ struct amdgpu_cs_parser {
/* buffer objects */
struct ww_acquire_ctx ticket;
struct amdgpu_bo_list *bo_list;
struct amdgpu_mn *mn;
struct amdgpu_bo_list_entry vm_pd;
struct list_head validated;
struct dma_fence *fence;
@ -1096,6 +1125,7 @@ struct amdgpu_job {
uint32_t gds_base, gds_size;
uint32_t gws_base, gws_size;
uint32_t oa_base, oa_size;
uint32_t vram_lost_counter;
/* user fence handling */
uint64_t uf_addr;
@ -1121,7 +1151,7 @@ static inline void amdgpu_set_ib_value(struct amdgpu_cs_parser *p,
/*
* Writeback
*/
#define AMDGPU_MAX_WB 1024 /* Reserve at most 1024 WB slots for amdgpu-owned rings. */
#define AMDGPU_MAX_WB 512 /* Reserve at most 512 WB slots for amdgpu-owned rings. */
struct amdgpu_wb {
struct amdgpu_bo *wb_obj;
@ -1183,6 +1213,9 @@ struct amdgpu_firmware {
/* gpu info firmware data pointer */
const struct firmware *gpu_info_fw;
void *fw_buf_ptr;
uint64_t fw_buf_mc;
};
/*
@ -1196,20 +1229,6 @@ void amdgpu_benchmark(struct amdgpu_device *adev, int test_number);
*/
void amdgpu_test_moves(struct amdgpu_device *adev);
/*
* MMU Notifier
*/
#if defined(CONFIG_MMU_NOTIFIER)
int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr);
void amdgpu_mn_unregister(struct amdgpu_bo *bo);
#else
static inline int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
{
return -ENODEV;
}
static inline void amdgpu_mn_unregister(struct amdgpu_bo *bo) {}
#endif
/*
* Debugfs
*/
@ -1305,6 +1324,8 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
@ -1370,6 +1391,18 @@ struct amdgpu_atcs {
struct amdgpu_atcs_functions functions;
};
/*
* Firmware VRAM reservation
*/
struct amdgpu_fw_vram_usage {
u64 start_offset;
u64 size;
struct amdgpu_bo *reserved_bo;
void *va;
};
int amdgpu_fw_reserve_vram_init(struct amdgpu_device *adev);
/*
* CGS
*/
@ -1519,7 +1552,6 @@ struct amdgpu_device {
/* powerplay */
struct amd_powerplay powerplay;
bool pp_enabled;
bool pp_force_state_enabled;
/* dpm */
@ -1575,6 +1607,8 @@ struct amdgpu_device {
struct delayed_work late_init_work;
struct amdgpu_virt virt;
/* firmware VRAM reservation */
struct amdgpu_fw_vram_usage fw_vram_usage;
/* link all shadow bo */
struct list_head shadow_list;
@ -1592,6 +1626,7 @@ struct amdgpu_device {
/* record last mm index being written through WREG32*/
unsigned long last_mm_index;
bool in_sriov_reset;
};
static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev)
@ -1759,6 +1794,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r))
#define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o))
#define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev))
#define amdgpu_ih_prescreen_iv(adev) (adev)->irq.ih_funcs->prescreen_iv((adev))
#define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv))
#define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev))
#define amdgpu_display_vblank_get_counter(adev, crtc) (adev)->mode_info.funcs->vblank_get_counter((adev), (crtc))
@ -1791,18 +1827,6 @@ void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
u64 num_vis_bytes);
void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain);
bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages);
int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
uint32_t flags);
bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm);
struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm);
bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
unsigned long end);
bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
int *last_invalidated);
bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm);
uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
struct ttm_mem_reg *mem);
void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base);
void amdgpu_gart_location(struct amdgpu_device *adev, struct amdgpu_mc *mc);
void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size);
@ -1836,8 +1860,6 @@ static inline bool amdgpu_has_atpx(void) { return false; }
extern const struct drm_ioctl_desc amdgpu_ioctls_kms[];
extern const int amdgpu_max_kms_ioctl;
bool amdgpu_kms_vram_lost(struct amdgpu_device *adev,
struct amdgpu_fpriv *fpriv);
int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags);
void amdgpu_driver_unload_kms(struct drm_device *dev);
void amdgpu_driver_lastclose_kms(struct drm_device *dev);
@ -1885,10 +1907,9 @@ static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; }
static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { }
#endif
struct amdgpu_bo_va_mapping *
amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
uint64_t addr, struct amdgpu_bo **bo);
int amdgpu_cs_sysvm_access_required(struct amdgpu_cs_parser *parser);
int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
uint64_t addr, struct amdgpu_bo **bo,
struct amdgpu_bo_va_mapping **mapping);
#include "amdgpu_object.h"
#endif

View File

@ -35,41 +35,50 @@
#include "acp_gfx_if.h"
#define ACP_TILE_ON_MASK 0x03
#define ACP_TILE_OFF_MASK 0x02
#define ACP_TILE_ON_RETAIN_REG_MASK 0x1f
#define ACP_TILE_OFF_RETAIN_REG_MASK 0x20
#define ACP_TILE_ON_MASK 0x03
#define ACP_TILE_OFF_MASK 0x02
#define ACP_TILE_ON_RETAIN_REG_MASK 0x1f
#define ACP_TILE_OFF_RETAIN_REG_MASK 0x20
#define ACP_TILE_P1_MASK 0x3e
#define ACP_TILE_P2_MASK 0x3d
#define ACP_TILE_DSP0_MASK 0x3b
#define ACP_TILE_DSP1_MASK 0x37
#define ACP_TILE_P1_MASK 0x3e
#define ACP_TILE_P2_MASK 0x3d
#define ACP_TILE_DSP0_MASK 0x3b
#define ACP_TILE_DSP1_MASK 0x37
#define ACP_TILE_DSP2_MASK 0x2f
#define ACP_TILE_DSP2_MASK 0x2f
#define ACP_DMA_REGS_END 0x146c0
#define ACP_I2S_PLAY_REGS_START 0x14840
#define ACP_I2S_PLAY_REGS_END 0x148b4
#define ACP_I2S_CAP_REGS_START 0x148b8
#define ACP_I2S_CAP_REGS_END 0x1496c
#define ACP_DMA_REGS_END 0x146c0
#define ACP_I2S_PLAY_REGS_START 0x14840
#define ACP_I2S_PLAY_REGS_END 0x148b4
#define ACP_I2S_CAP_REGS_START 0x148b8
#define ACP_I2S_CAP_REGS_END 0x1496c
#define ACP_I2S_COMP1_CAP_REG_OFFSET 0xac
#define ACP_I2S_COMP2_CAP_REG_OFFSET 0xa8
#define ACP_I2S_COMP1_PLAY_REG_OFFSET 0x6c
#define ACP_I2S_COMP2_PLAY_REG_OFFSET 0x68
#define ACP_I2S_COMP1_CAP_REG_OFFSET 0xac
#define ACP_I2S_COMP2_CAP_REG_OFFSET 0xa8
#define ACP_I2S_COMP1_PLAY_REG_OFFSET 0x6c
#define ACP_I2S_COMP2_PLAY_REG_OFFSET 0x68
#define mmACP_PGFSM_RETAIN_REG 0x51c9
#define mmACP_PGFSM_CONFIG_REG 0x51ca
#define mmACP_PGFSM_READ_REG_0 0x51cc
#define mmACP_PGFSM_RETAIN_REG 0x51c9
#define mmACP_PGFSM_CONFIG_REG 0x51ca
#define mmACP_PGFSM_READ_REG_0 0x51cc
#define mmACP_MEM_SHUT_DOWN_REQ_LO 0x51f8
#define mmACP_MEM_SHUT_DOWN_REQ_HI 0x51f9
#define mmACP_MEM_SHUT_DOWN_STS_LO 0x51fa
#define mmACP_MEM_SHUT_DOWN_STS_HI 0x51fb
#define mmACP_MEM_SHUT_DOWN_REQ_LO 0x51f8
#define mmACP_MEM_SHUT_DOWN_REQ_HI 0x51f9
#define mmACP_MEM_SHUT_DOWN_STS_LO 0x51fa
#define mmACP_MEM_SHUT_DOWN_STS_HI 0x51fb
#define ACP_TIMEOUT_LOOP 0x000000FF
#define ACP_DEVS 3
#define ACP_SRC_ID 162
#define mmACP_CONTROL 0x5131
#define mmACP_STATUS 0x5133
#define mmACP_SOFT_RESET 0x5134
#define ACP_CONTROL__ClkEn_MASK 0x1
#define ACP_SOFT_RESET__SoftResetAud_MASK 0x100
#define ACP_SOFT_RESET__SoftResetAudDone_MASK 0x1000000
#define ACP_CLOCK_EN_TIME_OUT_VALUE 0x000000FF
#define ACP_SOFT_RESET_DONE_TIME_OUT_VALUE 0x000000FF
#define ACP_TIMEOUT_LOOP 0x000000FF
#define ACP_DEVS 3
#define ACP_SRC_ID 162
enum {
ACP_TILE_P1 = 0,
@ -260,6 +269,8 @@ static int acp_hw_init(void *handle)
{
int r, i;
uint64_t acp_base;
u32 val = 0;
u32 count = 0;
struct device *dev;
struct i2s_platform_data *i2s_pdata;
@ -402,6 +413,46 @@ static int acp_hw_init(void *handle)
}
}
/* Assert Soft reset of ACP */
val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
val |= ACP_SOFT_RESET__SoftResetAud_MASK;
cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val);
count = ACP_SOFT_RESET_DONE_TIME_OUT_VALUE;
while (true) {
val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
if (ACP_SOFT_RESET__SoftResetAudDone_MASK ==
(val & ACP_SOFT_RESET__SoftResetAudDone_MASK))
break;
if (--count == 0) {
dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
return -ETIMEDOUT;
}
udelay(100);
}
/* Enable clock to ACP and wait until the clock is enabled */
val = cgs_read_register(adev->acp.cgs_device, mmACP_CONTROL);
val = val | ACP_CONTROL__ClkEn_MASK;
cgs_write_register(adev->acp.cgs_device, mmACP_CONTROL, val);
count = ACP_CLOCK_EN_TIME_OUT_VALUE;
while (true) {
val = cgs_read_register(adev->acp.cgs_device, mmACP_STATUS);
if (val & (u32) 0x1)
break;
if (--count == 0) {
dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
return -ETIMEDOUT;
}
udelay(100);
}
/* Deassert the SOFT RESET flags */
val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
val &= ~ACP_SOFT_RESET__SoftResetAud_MASK;
cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val);
return 0;
}
@ -414,6 +465,8 @@ static int acp_hw_init(void *handle)
static int acp_hw_fini(void *handle)
{
int i, ret;
u32 val = 0;
u32 count = 0;
struct device *dev;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@ -421,6 +474,42 @@ static int acp_hw_fini(void *handle)
if (!adev->acp.acp_cell)
return 0;
/* Assert Soft reset of ACP */
val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
val |= ACP_SOFT_RESET__SoftResetAud_MASK;
cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val);
count = ACP_SOFT_RESET_DONE_TIME_OUT_VALUE;
while (true) {
val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
if (ACP_SOFT_RESET__SoftResetAudDone_MASK ==
(val & ACP_SOFT_RESET__SoftResetAudDone_MASK))
break;
if (--count == 0) {
dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
return -ETIMEDOUT;
}
udelay(100);
}
/* Disable ACP clock */
val = cgs_read_register(adev->acp.cgs_device, mmACP_CONTROL);
val &= ~ACP_CONTROL__ClkEn_MASK;
cgs_write_register(adev->acp.cgs_device, mmACP_CONTROL, val);
count = ACP_CLOCK_EN_TIME_OUT_VALUE;
while (true) {
val = cgs_read_register(adev->acp.cgs_device, mmACP_STATUS);
if (val & (u32) 0x1)
break;
if (--count == 0) {
dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
return -ETIMEDOUT;
}
udelay(100);
}
if (adev->acp.acp_genpd) {
for (i = 0; i < ACP_DEVS ; i++) {
dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);

View File

@ -169,6 +169,8 @@ static const struct kfd2kgd_calls kfd2kgd = {
.get_vmem_size = get_vmem_size,
.get_gpu_clock_counter = get_gpu_clock_counter,
.get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz,
.alloc_pasid = amdgpu_vm_alloc_pasid,
.free_pasid = amdgpu_vm_free_pasid,
.program_sh_mem_settings = kgd_program_sh_mem_settings,
.set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
.init_pipeline = kgd_init_pipeline,
@ -336,6 +338,7 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
struct cik_mqd *m;
uint32_t *mqd_hqd;
uint32_t reg, wptr_val, data;
bool valid_wptr = false;
m = get_mqd(mqd);
@ -354,7 +357,14 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, data);
if (read_user_wptr(mm, wptr, wptr_val))
/* read_user_ptr may take the mm->mmap_sem.
* release srbm_mutex to avoid circular dependency between
* srbm_mutex->mm_sem->reservation_ww_class_mutex->srbm_mutex.
*/
release_queue(kgd);
valid_wptr = read_user_wptr(mm, wptr, wptr_val);
acquire_queue(kgd, pipe_id, queue_id);
if (valid_wptr)
WREG32(mmCP_HQD_PQ_WPTR, (wptr_val << wptr_shift) & wptr_mask);
data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1);

View File

@ -128,6 +128,8 @@ static const struct kfd2kgd_calls kfd2kgd = {
.get_vmem_size = get_vmem_size,
.get_gpu_clock_counter = get_gpu_clock_counter,
.get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz,
.alloc_pasid = amdgpu_vm_alloc_pasid,
.free_pasid = amdgpu_vm_free_pasid,
.program_sh_mem_settings = kgd_program_sh_mem_settings,
.set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
.init_pipeline = kgd_init_pipeline,
@ -290,6 +292,7 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
struct vi_mqd *m;
uint32_t *mqd_hqd;
uint32_t reg, wptr_val, data;
bool valid_wptr = false;
m = get_mqd(mqd);
@ -337,7 +340,14 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, data);
if (read_user_wptr(mm, wptr, wptr_val))
/* read_user_ptr may take the mm->mmap_sem.
* release srbm_mutex to avoid circular dependency between
* srbm_mutex->mm_sem->reservation_ww_class_mutex->srbm_mutex.
*/
release_queue(kgd);
valid_wptr = read_user_wptr(mm, wptr, wptr_val);
acquire_queue(kgd, pipe_id, queue_id);
if (valid_wptr)
WREG32(mmCP_HQD_PQ_WPTR, (wptr_val << wptr_shift) & wptr_mask);
data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1);

View File

@ -1766,34 +1766,32 @@ bool amdgpu_atombios_scratch_need_asic_init(struct amdgpu_device *adev)
return true;
}
/* Atom needs data in little endian format
* so swap as appropriate when copying data to
* or from atom. Note that atom operates on
* dw units.
/* Atom needs data in little endian format so swap as appropriate when copying
* data to or from atom. Note that atom operates on dw units.
*
* Use to_le=true when sending data to atom and provide at least
* ALIGN(num_bytes,4) bytes in the dst buffer.
*
* Use to_le=false when receiving data from atom and provide ALIGN(num_bytes,4)
* byes in the src buffer.
*/
void amdgpu_atombios_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le)
{
#ifdef __BIG_ENDIAN
u8 src_tmp[20], dst_tmp[20]; /* used for byteswapping */
u32 *dst32, *src32;
u32 src_tmp[5], dst_tmp[5];
int i;
u8 align_num_bytes = ALIGN(num_bytes, 4);
memcpy(src_tmp, src, num_bytes);
src32 = (u32 *)src_tmp;
dst32 = (u32 *)dst_tmp;
if (to_le) {
for (i = 0; i < ((num_bytes + 3) / 4); i++)
dst32[i] = cpu_to_le32(src32[i]);
memcpy(dst, dst_tmp, num_bytes);
memcpy(src_tmp, src, num_bytes);
for (i = 0; i < align_num_bytes / 4; i++)
dst_tmp[i] = cpu_to_le32(src_tmp[i]);
memcpy(dst, dst_tmp, align_num_bytes);
} else {
u8 dws = num_bytes & ~3;
for (i = 0; i < ((num_bytes + 3) / 4); i++)
dst32[i] = le32_to_cpu(src32[i]);
memcpy(dst, dst_tmp, dws);
if (num_bytes % 4) {
for (i = 0; i < (num_bytes % 4); i++)
dst[dws+i] = dst_tmp[dws+i];
}
memcpy(src_tmp, src, align_num_bytes);
for (i = 0; i < align_num_bytes / 4; i++)
dst_tmp[i] = le32_to_cpu(src_tmp[i]);
memcpy(dst, dst_tmp, num_bytes);
}
#else
memcpy(dst, src, num_bytes);
@ -1807,6 +1805,8 @@ int amdgpu_atombios_allocate_fb_scratch(struct amdgpu_device *adev)
uint16_t data_offset;
int usage_bytes = 0;
struct _ATOM_VRAM_USAGE_BY_FIRMWARE *firmware_usage;
u64 start_addr;
u64 size;
if (amdgpu_atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) {
firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset);
@ -1815,7 +1815,21 @@ int amdgpu_atombios_allocate_fb_scratch(struct amdgpu_device *adev)
le32_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware),
le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb));
usage_bytes = le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb) * 1024;
start_addr = firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware;
size = firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb;
if ((uint32_t)(start_addr & ATOM_VRAM_OPERATION_FLAGS_MASK) ==
(uint32_t)(ATOM_VRAM_BLOCK_SRIOV_MSG_SHARE_RESERVATION <<
ATOM_VRAM_OPERATION_FLAGS_SHIFT)) {
/* Firmware request VRAM reservation for SR-IOV */
adev->fw_vram_usage.start_offset = (start_addr &
(~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10;
adev->fw_vram_usage.size = size << 10;
/* Use the default scratch size */
usage_bytes = 0;
} else {
usage_bytes = le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb) * 1024;
}
}
ctx->scratch_size_bytes = 0;
if (usage_bytes == 0)

View File

@ -71,19 +71,33 @@ int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev)
struct atom_context *ctx = adev->mode_info.atom_context;
int index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
vram_usagebyfirmware);
struct vram_usagebyfirmware_v2_1 * firmware_usage;
uint32_t start_addr, size;
uint16_t data_offset;
int usage_bytes = 0;
if (amdgpu_atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) {
struct vram_usagebyfirmware_v2_1 *firmware_usage =
(struct vram_usagebyfirmware_v2_1 *)(ctx->bios + data_offset);
firmware_usage = (struct vram_usagebyfirmware_v2_1 *)(ctx->bios + data_offset);
DRM_DEBUG("atom firmware requested %08x %dkb fw %dkb drv\n",
le32_to_cpu(firmware_usage->start_address_in_kb),
le16_to_cpu(firmware_usage->used_by_firmware_in_kb),
le16_to_cpu(firmware_usage->used_by_driver_in_kb));
usage_bytes = le16_to_cpu(firmware_usage->used_by_driver_in_kb) * 1024;
start_addr = le32_to_cpu(firmware_usage->start_address_in_kb);
size = le16_to_cpu(firmware_usage->used_by_firmware_in_kb);
if ((uint32_t)(start_addr & ATOM_VRAM_OPERATION_FLAGS_MASK) ==
(uint32_t)(ATOM_VRAM_BLOCK_SRIOV_MSG_SHARE_RESERVATION <<
ATOM_VRAM_OPERATION_FLAGS_SHIFT)) {
/* Firmware request VRAM reservation for SR-IOV */
adev->fw_vram_usage.start_offset = (start_addr &
(~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10;
adev->fw_vram_usage.size = size << 10;
/* Use the default scratch size */
usage_bytes = 0;
} else {
usage_bytes = le16_to_cpu(firmware_usage->used_by_driver_in_kb) << 10;
}
}
ctx->scratch_size_bytes = 0;
if (usage_bytes == 0)

View File

@ -42,10 +42,31 @@ struct amdgpu_cgs_device {
struct amdgpu_device *adev = \
((struct amdgpu_cgs_device *)cgs_device)->adev
static void *amdgpu_cgs_register_pp_handle(struct cgs_device *cgs_device,
int (*call_back_func)(struct amd_pp_init *, void **))
{
CGS_FUNC_ADEV;
struct amd_pp_init pp_init;
struct amd_powerplay *amd_pp;
if (call_back_func == NULL)
return NULL;
amd_pp = &(adev->powerplay);
pp_init.chip_family = adev->family;
pp_init.chip_id = adev->asic_type;
pp_init.pm_en = (amdgpu_dpm != 0 && !amdgpu_sriov_vf(adev)) ? true : false;
pp_init.feature_mask = amdgpu_pp_feature_mask;
pp_init.device = cgs_device;
if (call_back_func(&pp_init, &(amd_pp->pp_handle)))
return NULL;
return adev->powerplay.pp_handle;
}
static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device,
enum cgs_gpu_mem_type type,
uint64_t size, uint64_t align,
uint64_t min_offset, uint64_t max_offset,
cgs_handle_t *handle)
{
CGS_FUNC_ADEV;
@ -53,13 +74,6 @@ static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device,
int ret = 0;
uint32_t domain = 0;
struct amdgpu_bo *obj;
struct ttm_placement placement;
struct ttm_place place;
if (min_offset > max_offset) {
BUG_ON(1);
return -EINVAL;
}
/* fail if the alignment is not a power of 2 */
if (((align != 1) && (align & (align - 1)))
@ -73,41 +87,19 @@ static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device,
flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
domain = AMDGPU_GEM_DOMAIN_VRAM;
if (max_offset > adev->mc.real_vram_size)
return -EINVAL;
place.fpfn = min_offset >> PAGE_SHIFT;
place.lpfn = max_offset >> PAGE_SHIFT;
place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
TTM_PL_FLAG_VRAM;
break;
case CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB:
case CGS_GPU_MEM_TYPE__INVISIBLE_FB:
flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
domain = AMDGPU_GEM_DOMAIN_VRAM;
if (adev->mc.visible_vram_size < adev->mc.real_vram_size) {
place.fpfn =
max(min_offset, adev->mc.visible_vram_size) >> PAGE_SHIFT;
place.lpfn =
min(max_offset, adev->mc.real_vram_size) >> PAGE_SHIFT;
place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
TTM_PL_FLAG_VRAM;
}
break;
case CGS_GPU_MEM_TYPE__GART_CACHEABLE:
domain = AMDGPU_GEM_DOMAIN_GTT;
place.fpfn = min_offset >> PAGE_SHIFT;
place.lpfn = max_offset >> PAGE_SHIFT;
place.flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT;
break;
case CGS_GPU_MEM_TYPE__GART_WRITECOMBINE:
flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC;
domain = AMDGPU_GEM_DOMAIN_GTT;
place.fpfn = min_offset >> PAGE_SHIFT;
place.lpfn = max_offset >> PAGE_SHIFT;
place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT |
TTM_PL_FLAG_UNCACHED;
break;
default:
return -EINVAL;
@ -116,15 +108,8 @@ static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device,
*handle = 0;
placement.placement = &place;
placement.num_placement = 1;
placement.busy_placement = &place;
placement.num_busy_placement = 1;
ret = amdgpu_bo_create_restricted(adev, size, PAGE_SIZE,
true, domain, flags,
NULL, &placement, NULL,
0, &obj);
ret = amdgpu_bo_create(adev, size, align, true, domain, flags,
NULL, NULL, 0, &obj);
if (ret) {
DRM_ERROR("(%d) bo create failed\n", ret);
return ret;
@ -155,19 +140,14 @@ static int amdgpu_cgs_gmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t h
uint64_t *mcaddr)
{
int r;
u64 min_offset, max_offset;
struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
WARN_ON_ONCE(obj->placement.num_placement > 1);
min_offset = obj->placements[0].fpfn << PAGE_SHIFT;
max_offset = obj->placements[0].lpfn << PAGE_SHIFT;
r = amdgpu_bo_reserve(obj, true);
if (unlikely(r != 0))
return r;
r = amdgpu_bo_pin_restricted(obj, obj->preferred_domains,
min_offset, max_offset, mcaddr);
r = amdgpu_bo_pin(obj, obj->preferred_domains, mcaddr);
amdgpu_bo_unreserve(obj);
return r;
}
@ -675,6 +655,85 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
if (!adev->pm.fw) {
switch (adev->asic_type) {
case CHIP_TAHITI:
strcpy(fw_name, "radeon/tahiti_smc.bin");
break;
case CHIP_PITCAIRN:
if ((adev->pdev->revision == 0x81) &&
((adev->pdev->device == 0x6810) ||
(adev->pdev->device == 0x6811))) {
info->is_kicker = true;
strcpy(fw_name, "radeon/pitcairn_k_smc.bin");
} else {
strcpy(fw_name, "radeon/pitcairn_smc.bin");
}
break;
case CHIP_VERDE:
if (((adev->pdev->device == 0x6820) &&
((adev->pdev->revision == 0x81) ||
(adev->pdev->revision == 0x83))) ||
((adev->pdev->device == 0x6821) &&
((adev->pdev->revision == 0x83) ||
(adev->pdev->revision == 0x87))) ||
((adev->pdev->revision == 0x87) &&
((adev->pdev->device == 0x6823) ||
(adev->pdev->device == 0x682b)))) {
info->is_kicker = true;
strcpy(fw_name, "radeon/verde_k_smc.bin");
} else {
strcpy(fw_name, "radeon/verde_smc.bin");
}
break;
case CHIP_OLAND:
if (((adev->pdev->revision == 0x81) &&
((adev->pdev->device == 0x6600) ||
(adev->pdev->device == 0x6604) ||
(adev->pdev->device == 0x6605) ||
(adev->pdev->device == 0x6610))) ||
((adev->pdev->revision == 0x83) &&
(adev->pdev->device == 0x6610))) {
info->is_kicker = true;
strcpy(fw_name, "radeon/oland_k_smc.bin");
} else {
strcpy(fw_name, "radeon/oland_smc.bin");
}
break;
case CHIP_HAINAN:
if (((adev->pdev->revision == 0x81) &&
(adev->pdev->device == 0x6660)) ||
((adev->pdev->revision == 0x83) &&
((adev->pdev->device == 0x6660) ||
(adev->pdev->device == 0x6663) ||
(adev->pdev->device == 0x6665) ||
(adev->pdev->device == 0x6667)))) {
info->is_kicker = true;
strcpy(fw_name, "radeon/hainan_k_smc.bin");
} else if ((adev->pdev->revision == 0xc3) &&
(adev->pdev->device == 0x6665)) {
info->is_kicker = true;
strcpy(fw_name, "radeon/banks_k_2_smc.bin");
} else {
strcpy(fw_name, "radeon/hainan_smc.bin");
}
break;
case CHIP_BONAIRE:
if ((adev->pdev->revision == 0x80) ||
(adev->pdev->revision == 0x81) ||
(adev->pdev->device == 0x665f)) {
info->is_kicker = true;
strcpy(fw_name, "radeon/bonaire_k_smc.bin");
} else {
strcpy(fw_name, "radeon/bonaire_smc.bin");
}
break;
case CHIP_HAWAII:
if (adev->pdev->revision == 0x80) {
info->is_kicker = true;
strcpy(fw_name, "radeon/hawaii_k_smc.bin");
} else {
strcpy(fw_name, "radeon/hawaii_smc.bin");
}
break;
case CHIP_TOPAZ:
if (((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x81)) ||
((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x83)) ||
@ -838,6 +897,9 @@ static int amdgpu_cgs_query_system_info(struct cgs_device *cgs_device,
case CGS_SYSTEM_INFO_PCIE_SUB_SYS_VENDOR_ID:
sys_info->value = adev->pdev->subsystem_vendor;
break;
case CGS_SYSTEM_INFO_PCIE_BUS_DEVFN:
sys_info->value = adev->pdev->devfn;
break;
default:
return -ENODEV;
}
@ -1139,6 +1201,7 @@ static const struct cgs_ops amdgpu_cgs_ops = {
.is_virtualization_enabled = amdgpu_cgs_is_virtualization_enabled,
.enter_safe_mode = amdgpu_cgs_enter_safe_mode,
.lock_grbm_idx = amdgpu_cgs_lock_grbm_idx,
.register_pp_handle = amdgpu_cgs_register_pp_handle,
};
static const struct cgs_os_ops amdgpu_cgs_os_ops = {

View File

@ -231,7 +231,7 @@ amdgpu_connector_update_scratch_regs(struct drm_connector *connector,
if (connector->encoder_ids[i] == 0)
break;
encoder = drm_encoder_find(connector->dev,
encoder = drm_encoder_find(connector->dev, NULL,
connector->encoder_ids[i]);
if (!encoder)
continue;
@ -256,7 +256,7 @@ amdgpu_connector_find_encoder(struct drm_connector *connector,
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
if (connector->encoder_ids[i] == 0)
break;
encoder = drm_encoder_find(connector->dev,
encoder = drm_encoder_find(connector->dev, NULL,
connector->encoder_ids[i]);
if (!encoder)
continue;
@ -346,10 +346,8 @@ static void amdgpu_connector_free_edid(struct drm_connector *connector)
{
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
if (amdgpu_connector->edid) {
kfree(amdgpu_connector->edid);
amdgpu_connector->edid = NULL;
}
kfree(amdgpu_connector->edid);
amdgpu_connector->edid = NULL;
}
static int amdgpu_connector_ddc_get_modes(struct drm_connector *connector)
@ -374,7 +372,7 @@ amdgpu_connector_best_single_encoder(struct drm_connector *connector)
/* pick the encoder ids */
if (enc_id)
return drm_encoder_find(connector->dev, enc_id);
return drm_encoder_find(connector->dev, NULL, enc_id);
return NULL;
}
@ -1079,7 +1077,7 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force)
if (connector->encoder_ids[i] == 0)
break;
encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]);
encoder = drm_encoder_find(connector->dev, NULL, connector->encoder_ids[i]);
if (!encoder)
continue;
@ -1136,7 +1134,7 @@ amdgpu_connector_dvi_encoder(struct drm_connector *connector)
if (connector->encoder_ids[i] == 0)
break;
encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]);
encoder = drm_encoder_find(connector->dev, NULL, connector->encoder_ids[i]);
if (!encoder)
continue;
@ -1155,7 +1153,7 @@ amdgpu_connector_dvi_encoder(struct drm_connector *connector)
/* then check use digitial */
/* pick the first one */
if (enc_id)
return drm_encoder_find(connector->dev, enc_id);
return drm_encoder_find(connector->dev, NULL, enc_id);
return NULL;
}
@ -1296,7 +1294,7 @@ u16 amdgpu_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *conn
if (connector->encoder_ids[i] == 0)
break;
encoder = drm_encoder_find(connector->dev,
encoder = drm_encoder_find(connector->dev, NULL,
connector->encoder_ids[i]);
if (!encoder)
continue;
@ -1325,7 +1323,7 @@ static bool amdgpu_connector_encoder_is_hbr2(struct drm_connector *connector)
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
if (connector->encoder_ids[i] == 0)
break;
encoder = drm_encoder_find(connector->dev,
encoder = drm_encoder_find(connector->dev, NULL,
connector->encoder_ids[i]);
if (!encoder)
continue;

View File

@ -25,6 +25,7 @@
* Jerome Glisse <glisse@freedesktop.org>
*/
#include <linux/pagemap.h>
#include <linux/sync_file.h>
#include <drm/drmP.h>
#include <drm/amdgpu_drm.h>
#include <drm/drm_syncobj.h>
@ -89,12 +90,14 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
goto free_chunk;
}
mutex_lock(&p->ctx->lock);
/* get chunks */
chunk_array_user = u64_to_user_ptr(cs->in.chunks);
if (copy_from_user(chunk_array, chunk_array_user,
sizeof(uint64_t)*cs->in.num_chunks)) {
ret = -EFAULT;
goto put_ctx;
goto free_chunk;
}
p->nchunks = cs->in.num_chunks;
@ -102,7 +105,7 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
GFP_KERNEL);
if (!p->chunks) {
ret = -ENOMEM;
goto put_ctx;
goto free_chunk;
}
for (i = 0; i < p->nchunks; i++) {
@ -169,6 +172,11 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
if (ret)
goto free_all_kdata;
if (p->ctx->vram_lost_counter != p->job->vram_lost_counter) {
ret = -ECANCELED;
goto free_all_kdata;
}
if (p->uf_entry.robj)
p->job->uf_addr = uf_offset;
kfree(chunk_array);
@ -182,8 +190,6 @@ free_partial_kdata:
kfree(p->chunks);
p->chunks = NULL;
p->nchunks = 0;
put_ctx:
amdgpu_ctx_put(p->ctx);
free_chunk:
kfree(chunk_array);
@ -473,11 +479,16 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
return -EPERM;
/* Check if we have user pages and nobody bound the BO already */
if (lobj->user_pages && bo->tbo.ttm->state != tt_bound) {
size_t size = sizeof(struct page *);
size *= bo->tbo.ttm->num_pages;
memcpy(bo->tbo.ttm->pages, lobj->user_pages, size);
if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm) &&
lobj->user_pages) {
amdgpu_ttm_placement_from_domain(bo,
AMDGPU_GEM_DOMAIN_CPU);
r = ttm_bo_validate(&bo->tbo, &bo->placement, true,
false);
if (r)
return r;
amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm,
lobj->user_pages);
binding_userptr = true;
}
@ -502,7 +513,6 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
struct amdgpu_bo_list_entry *e;
struct list_head duplicates;
bool need_mmap_lock = false;
unsigned i, tries = 10;
int r;
@ -510,9 +520,9 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
p->bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle);
if (p->bo_list) {
need_mmap_lock = p->bo_list->first_userptr !=
p->bo_list->num_entries;
amdgpu_bo_list_get_list(p->bo_list, &p->validated);
if (p->bo_list->first_userptr != p->bo_list->num_entries)
p->mn = amdgpu_mn_get(p->adev);
}
INIT_LIST_HEAD(&duplicates);
@ -521,9 +531,6 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
if (p->uf_entry.robj)
list_add(&p->uf_entry.tv.head, &p->validated);
if (need_mmap_lock)
down_read(&current->mm->mmap_sem);
while (1) {
struct list_head need_pages;
unsigned i;
@ -543,22 +550,24 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
INIT_LIST_HEAD(&need_pages);
for (i = p->bo_list->first_userptr;
i < p->bo_list->num_entries; ++i) {
struct amdgpu_bo *bo;
e = &p->bo_list->array[i];
bo = e->robj;
if (amdgpu_ttm_tt_userptr_invalidated(e->robj->tbo.ttm,
if (amdgpu_ttm_tt_userptr_invalidated(bo->tbo.ttm,
&e->user_invalidated) && e->user_pages) {
/* We acquired a page array, but somebody
* invalidated it. Free it and try again
*/
release_pages(e->user_pages,
e->robj->tbo.ttm->num_pages);
bo->tbo.ttm->num_pages);
kvfree(e->user_pages);
e->user_pages = NULL;
}
if (e->robj->tbo.ttm->state != tt_bound &&
if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm) &&
!e->user_pages) {
list_del(&e->tv.head);
list_add(&e->tv.head, &need_pages);
@ -635,9 +644,6 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved,
p->bytes_moved_vis);
fpriv->vm.last_eviction_counter =
atomic64_read(&p->adev->num_evictions);
if (p->bo_list) {
struct amdgpu_bo *gds = p->bo_list->gds_obj;
struct amdgpu_bo *gws = p->bo_list->gws_obj;
@ -678,9 +684,6 @@ error_validate:
error_free_pages:
if (need_mmap_lock)
up_read(&current->mm->mmap_sem);
if (p->bo_list) {
for (i = p->bo_list->first_userptr;
i < p->bo_list->num_entries; ++i) {
@ -705,7 +708,8 @@ static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
list_for_each_entry(e, &p->validated, tv.head) {
struct reservation_object *resv = e->robj->tbo.resv;
r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, p->filp);
r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, p->filp,
amdgpu_bo_explicit_sync(e->robj));
if (r)
return r;
@ -726,11 +730,7 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,
{
unsigned i;
if (!error)
ttm_eu_fence_buffer_objects(&parser->ticket,
&parser->validated,
parser->fence);
else if (backoff)
if (error && backoff)
ttm_eu_backoff_reservation(&parser->ticket,
&parser->validated);
@ -740,8 +740,10 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,
dma_fence_put(parser->fence);
if (parser->ctx)
if (parser->ctx) {
mutex_unlock(&parser->ctx->lock);
amdgpu_ctx_put(parser->ctx);
}
if (parser->bo_list)
amdgpu_bo_list_put(parser->bo_list);
@ -766,10 +768,6 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
if (r)
return r;
r = amdgpu_sync_fence(adev, &p->job->sync, vm->last_dir_update);
if (r)
return r;
r = amdgpu_vm_clear_freed(adev, vm, NULL);
if (r)
return r;
@ -823,7 +821,13 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
}
r = amdgpu_vm_clear_moved(adev, vm, &p->job->sync);
r = amdgpu_vm_handle_moved(adev, vm);
if (r)
return r;
r = amdgpu_sync_fence(adev, &p->job->sync, vm->last_update);
if (r)
return r;
if (amdgpu_vm_debug && p->bo_list) {
/* Invalidate all BOs to test for userspace bugs */
@ -833,7 +837,7 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
if (!bo)
continue;
amdgpu_vm_bo_invalidate(adev, bo);
amdgpu_vm_bo_invalidate(adev, bo, false);
}
}
@ -846,19 +850,63 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
struct amdgpu_vm *vm = &fpriv->vm;
struct amdgpu_ring *ring = p->job->ring;
int i, r;
int r;
/* Only for UVD/VCE VM emulation */
if (ring->funcs->parse_cs) {
for (i = 0; i < p->job->num_ibs; i++) {
r = amdgpu_ring_parse_cs(ring, p, i);
if (p->job->ring->funcs->parse_cs) {
unsigned i, j;
for (i = 0, j = 0; i < p->nchunks && j < p->job->num_ibs; i++) {
struct drm_amdgpu_cs_chunk_ib *chunk_ib;
struct amdgpu_bo_va_mapping *m;
struct amdgpu_bo *aobj = NULL;
struct amdgpu_cs_chunk *chunk;
struct amdgpu_ib *ib;
uint64_t offset;
uint8_t *kptr;
chunk = &p->chunks[i];
ib = &p->job->ibs[j];
chunk_ib = chunk->kdata;
if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB)
continue;
r = amdgpu_cs_find_mapping(p, chunk_ib->va_start,
&aobj, &m);
if (r) {
DRM_ERROR("IB va_start is invalid\n");
return r;
}
if ((chunk_ib->va_start + chunk_ib->ib_bytes) >
(m->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
DRM_ERROR("IB va_start+ib_bytes is invalid\n");
return -EINVAL;
}
/* the IB should be reserved at this point */
r = amdgpu_bo_kmap(aobj, (void **)&kptr);
if (r) {
return r;
}
offset = m->start * AMDGPU_GPU_PAGE_SIZE;
kptr += chunk_ib->va_start - offset;
memcpy(ib->ptr, kptr, chunk_ib->ib_bytes);
amdgpu_bo_kunmap(aobj);
r = amdgpu_ring_parse_cs(ring, p, j);
if (r)
return r;
j++;
}
}
if (p->job->vm) {
p->job->vm_pd_addr = amdgpu_bo_gpu_offset(vm->root.bo);
p->job->vm_pd_addr = amdgpu_bo_gpu_offset(vm->root.base.bo);
r = amdgpu_bo_vm_update_pte(p);
if (r)
@ -920,54 +968,18 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
parser->job->ring = ring;
if (ring->funcs->parse_cs) {
struct amdgpu_bo_va_mapping *m;
struct amdgpu_bo *aobj = NULL;
uint64_t offset;
uint8_t *kptr;
m = amdgpu_cs_find_mapping(parser, chunk_ib->va_start,
&aobj);
if (!aobj) {
DRM_ERROR("IB va_start is invalid\n");
return -EINVAL;
}
if ((chunk_ib->va_start + chunk_ib->ib_bytes) >
(m->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
DRM_ERROR("IB va_start+ib_bytes is invalid\n");
return -EINVAL;
}
/* the IB should be reserved at this point */
r = amdgpu_bo_kmap(aobj, (void **)&kptr);
if (r) {
return r;
}
offset = m->start * AMDGPU_GPU_PAGE_SIZE;
kptr += chunk_ib->va_start - offset;
r = amdgpu_ib_get(adev, vm, chunk_ib->ib_bytes, ib);
if (r) {
DRM_ERROR("Failed to get ib !\n");
return r;
}
memcpy(ib->ptr, kptr, chunk_ib->ib_bytes);
amdgpu_bo_kunmap(aobj);
} else {
r = amdgpu_ib_get(adev, vm, 0, ib);
if (r) {
DRM_ERROR("Failed to get ib !\n");
return r;
}
r = amdgpu_ib_get(adev, vm,
ring->funcs->parse_cs ? chunk_ib->ib_bytes : 0,
ib);
if (r) {
DRM_ERROR("Failed to get ib !\n");
return r;
}
ib->gpu_addr = chunk_ib->va_start;
ib->length_dw = chunk_ib->ib_bytes / 4;
ib->flags = chunk_ib->flags;
j++;
}
@ -977,7 +989,7 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
parser->job->ring->funcs->type == AMDGPU_RING_TYPE_VCE))
return -EINVAL;
return 0;
return amdgpu_ctx_wait_prev_fence(parser->ctx, parser->job->ring->idx);
}
static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p,
@ -1131,14 +1143,31 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
struct amdgpu_ring *ring = p->job->ring;
struct amd_sched_entity *entity = &p->ctx->rings[ring->idx].entity;
struct amdgpu_job *job;
unsigned i;
uint64_t seq;
int r;
amdgpu_mn_lock(p->mn);
if (p->bo_list) {
for (i = p->bo_list->first_userptr;
i < p->bo_list->num_entries; ++i) {
struct amdgpu_bo *bo = p->bo_list->array[i].robj;
if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm)) {
amdgpu_mn_unlock(p->mn);
return -ERESTARTSYS;
}
}
}
job = p->job;
p->job = NULL;
r = amd_sched_job_init(&job->base, &ring->sched, entity, p->filp);
if (r) {
amdgpu_job_free(job);
amdgpu_mn_unlock(p->mn);
return r;
}
@ -1146,21 +1175,36 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
job->fence_ctx = entity->fence_context;
p->fence = dma_fence_get(&job->base.s_fence->finished);
r = amdgpu_ctx_add_fence(p->ctx, ring, p->fence, &seq);
if (r) {
dma_fence_put(p->fence);
dma_fence_put(&job->base.s_fence->finished);
amdgpu_job_free(job);
amdgpu_mn_unlock(p->mn);
return r;
}
amdgpu_cs_post_dependencies(p);
cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, p->fence);
job->uf_sequence = cs->out.handle;
cs->out.handle = seq;
job->uf_sequence = seq;
amdgpu_job_free_resources(job);
amdgpu_ring_priority_get(job->ring,
amd_sched_get_job_priority(&job->base));
trace_amdgpu_cs_ioctl(job);
amd_sched_entity_push_job(&job->base);
ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence);
amdgpu_mn_unlock(p->mn);
return 0;
}
int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
{
struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_fpriv *fpriv = filp->driver_priv;
union drm_amdgpu_cs *cs = data;
struct amdgpu_cs_parser parser = {};
bool reserved_buffers = false;
@ -1168,8 +1212,6 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
if (!adev->accel_working)
return -EBUSY;
if (amdgpu_kms_vram_lost(adev, fpriv))
return -ENODEV;
parser.adev = adev;
parser.filp = filp;
@ -1180,6 +1222,10 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
goto out;
}
r = amdgpu_cs_ib_fill(adev, &parser);
if (r)
goto out;
r = amdgpu_cs_parser_bos(&parser, data);
if (r) {
if (r == -ENOMEM)
@ -1190,9 +1236,6 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
}
reserved_buffers = true;
r = amdgpu_cs_ib_fill(adev, &parser);
if (r)
goto out;
r = amdgpu_cs_dependencies(adev, &parser);
if (r) {
@ -1228,16 +1271,12 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
{
union drm_amdgpu_wait_cs *wait = data;
struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_fpriv *fpriv = filp->driver_priv;
unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout);
struct amdgpu_ring *ring = NULL;
struct amdgpu_ctx *ctx;
struct dma_fence *fence;
long r;
if (amdgpu_kms_vram_lost(adev, fpriv))
return -ENODEV;
ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id);
if (ctx == NULL)
return -EINVAL;
@ -1255,6 +1294,8 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
r = PTR_ERR(fence);
else if (fence) {
r = dma_fence_wait_timeout(fence, true, timeout);
if (r > 0 && fence->error)
r = fence->error;
dma_fence_put(fence);
} else
r = 1;
@ -1302,6 +1343,62 @@ static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev,
return fence;
}
int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
struct amdgpu_device *adev = dev->dev_private;
union drm_amdgpu_fence_to_handle *info = data;
struct dma_fence *fence;
struct drm_syncobj *syncobj;
struct sync_file *sync_file;
int fd, r;
fence = amdgpu_cs_get_fence(adev, filp, &info->in.fence);
if (IS_ERR(fence))
return PTR_ERR(fence);
switch (info->in.what) {
case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ:
r = drm_syncobj_create(&syncobj, 0, fence);
dma_fence_put(fence);
if (r)
return r;
r = drm_syncobj_get_handle(filp, syncobj, &info->out.handle);
drm_syncobj_put(syncobj);
return r;
case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD:
r = drm_syncobj_create(&syncobj, 0, fence);
dma_fence_put(fence);
if (r)
return r;
r = drm_syncobj_get_fd(syncobj, (int*)&info->out.handle);
drm_syncobj_put(syncobj);
return r;
case AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD:
fd = get_unused_fd_flags(O_CLOEXEC);
if (fd < 0) {
dma_fence_put(fence);
return fd;
}
sync_file = sync_file_create(fence);
dma_fence_put(fence);
if (!sync_file) {
put_unused_fd(fd);
return -ENOMEM;
}
fd_install(fd, sync_file->file);
info->out.handle = fd;
return 0;
default:
return -EINVAL;
}
}
/**
* amdgpu_cs_wait_all_fence - wait on all fences to signal
*
@ -1336,6 +1433,9 @@ static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev,
if (r == 0)
break;
if (fence->error)
return fence->error;
}
memset(wait, 0, sizeof(*wait));
@ -1381,6 +1481,7 @@ static int amdgpu_cs_wait_any_fence(struct amdgpu_device *adev,
array[i] = fence;
} else { /* NULL, the fence has been already signaled */
r = 1;
first = i;
goto out;
}
}
@ -1395,7 +1496,7 @@ out:
wait->out.status = (r > 0);
wait->out.first_signaled = first;
/* set return value 0 to indicate success */
r = 0;
r = array[first]->error;
err_free_fence_array:
for (i = 0; i < fence_count; i++)
@ -1416,15 +1517,12 @@ int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_fpriv *fpriv = filp->driver_priv;
union drm_amdgpu_wait_fences *wait = data;
uint32_t fence_count = wait->in.fence_count;
struct drm_amdgpu_fence *fences_user;
struct drm_amdgpu_fence *fences;
int r;
if (amdgpu_kms_vram_lost(adev, fpriv))
return -ENODEV;
/* Get the fences from userspace */
fences = kmalloc_array(fence_count, sizeof(struct drm_amdgpu_fence),
GFP_KERNEL);
@ -1460,78 +1558,36 @@ err_free_fences:
* virtual memory address. Returns allocation structure when found, NULL
* otherwise.
*/
struct amdgpu_bo_va_mapping *
amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
uint64_t addr, struct amdgpu_bo **bo)
int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
uint64_t addr, struct amdgpu_bo **bo,
struct amdgpu_bo_va_mapping **map)
{
struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
struct amdgpu_vm *vm = &fpriv->vm;
struct amdgpu_bo_va_mapping *mapping;
unsigned i;
if (!parser->bo_list)
return NULL;
int r;
addr /= AMDGPU_GPU_PAGE_SIZE;
for (i = 0; i < parser->bo_list->num_entries; i++) {
struct amdgpu_bo_list_entry *lobj;
mapping = amdgpu_vm_bo_lookup_mapping(vm, addr);
if (!mapping || !mapping->bo_va || !mapping->bo_va->base.bo)
return -EINVAL;
lobj = &parser->bo_list->array[i];
if (!lobj->bo_va)
continue;
*bo = mapping->bo_va->base.bo;
*map = mapping;
list_for_each_entry(mapping, &lobj->bo_va->valids, list) {
if (mapping->start > addr ||
addr > mapping->last)
continue;
/* Double check that the BO is reserved by this CS */
if (READ_ONCE((*bo)->tbo.resv->lock.ctx) != &parser->ticket)
return -EINVAL;
*bo = lobj->bo_va->base.bo;
return mapping;
}
list_for_each_entry(mapping, &lobj->bo_va->invalids, list) {
if (mapping->start > addr ||
addr > mapping->last)
continue;
*bo = lobj->bo_va->base.bo;
return mapping;
}
}
return NULL;
}
/**
* amdgpu_cs_sysvm_access_required - make BOs accessible by the system VM
*
* @parser: command submission parser context
*
* Helper for UVD/VCE VM emulation, make sure BOs are accessible by the system VM.
*/
int amdgpu_cs_sysvm_access_required(struct amdgpu_cs_parser *parser)
{
unsigned i;
int r;
if (!parser->bo_list)
return 0;
for (i = 0; i < parser->bo_list->num_entries; i++) {
struct amdgpu_bo *bo = parser->bo_list->array[i].robj;
r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem);
if (unlikely(r))
return r;
if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
continue;
bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
amdgpu_ttm_placement_from_domain(bo, bo->allowed_domains);
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
if (unlikely(r))
if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) {
(*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
amdgpu_ttm_placement_from_domain(*bo, (*bo)->allowed_domains);
r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, false,
false);
if (r)
return r;
}
return 0;
return amdgpu_ttm_bind(&(*bo)->tbo, &(*bo)->tbo.mem);
}

View File

@ -23,13 +23,41 @@
*/
#include <drm/drmP.h>
#include <drm/drm_auth.h>
#include "amdgpu.h"
#include "amdgpu_sched.h"
static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx)
static int amdgpu_ctx_priority_permit(struct drm_file *filp,
enum amd_sched_priority priority)
{
/* NORMAL and below are accessible by everyone */
if (priority <= AMD_SCHED_PRIORITY_NORMAL)
return 0;
if (capable(CAP_SYS_NICE))
return 0;
if (drm_is_current_master(filp))
return 0;
return -EACCES;
}
static int amdgpu_ctx_init(struct amdgpu_device *adev,
enum amd_sched_priority priority,
struct drm_file *filp,
struct amdgpu_ctx *ctx)
{
unsigned i, j;
int r;
if (priority < 0 || priority >= AMD_SCHED_PRIORITY_MAX)
return -EINVAL;
r = amdgpu_ctx_priority_permit(filp, priority);
if (r)
return r;
memset(ctx, 0, sizeof(*ctx));
ctx->adev = adev;
kref_init(&ctx->refcount);
@ -39,19 +67,24 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx)
if (!ctx->fences)
return -ENOMEM;
mutex_init(&ctx->lock);
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
ctx->rings[i].sequence = 1;
ctx->rings[i].fences = &ctx->fences[amdgpu_sched_jobs * i];
}
ctx->reset_counter = atomic_read(&adev->gpu_reset_counter);
ctx->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
ctx->init_priority = priority;
ctx->override_priority = AMD_SCHED_PRIORITY_UNSET;
/* create context entity for each ring */
for (i = 0; i < adev->num_rings; i++) {
struct amdgpu_ring *ring = adev->rings[i];
struct amd_sched_rq *rq;
rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
rq = &ring->sched.sched_rq[priority];
if (ring == &adev->gfx.kiq.ring)
continue;
@ -96,10 +129,14 @@ static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
&ctx->rings[i].entity);
amdgpu_queue_mgr_fini(adev, &ctx->queue_mgr);
mutex_destroy(&ctx->lock);
}
static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
struct amdgpu_fpriv *fpriv,
struct drm_file *filp,
enum amd_sched_priority priority,
uint32_t *id)
{
struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
@ -117,8 +154,9 @@ static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
kfree(ctx);
return r;
}
*id = (uint32_t)r;
r = amdgpu_ctx_init(adev, ctx);
r = amdgpu_ctx_init(adev, priority, filp, ctx);
if (r) {
idr_remove(&mgr->ctx_handles, *id);
*id = 0;
@ -193,6 +231,7 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
{
int r;
uint32_t id;
enum amd_sched_priority priority;
union drm_amdgpu_ctx *args = data;
struct amdgpu_device *adev = dev->dev_private;
@ -200,10 +239,16 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
r = 0;
id = args->in.ctx_id;
priority = amdgpu_to_sched_priority(args->in.priority);
/* For backwards compatibility reasons, we need to accept
* ioctls with garbage in the priority field */
if (priority == AMD_SCHED_PRIORITY_INVALID)
priority = AMD_SCHED_PRIORITY_NORMAL;
switch (args->in.op) {
case AMDGPU_CTX_OP_ALLOC_CTX:
r = amdgpu_ctx_alloc(adev, fpriv, &id);
r = amdgpu_ctx_alloc(adev, fpriv, filp, priority, &id);
args->out.alloc.ctx_id = id;
break;
case AMDGPU_CTX_OP_FREE_CTX:
@ -246,8 +291,8 @@ int amdgpu_ctx_put(struct amdgpu_ctx *ctx)
return 0;
}
uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
struct dma_fence *fence)
int amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
struct dma_fence *fence, uint64_t* handler)
{
struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
uint64_t seq = cring->sequence;
@ -256,12 +301,8 @@ uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
idx = seq & (amdgpu_sched_jobs - 1);
other = cring->fences[idx];
if (other) {
signed long r;
r = dma_fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT);
if (r < 0)
DRM_ERROR("Error (%ld) waiting for fence!\n", r);
}
if (other)
BUG_ON(!dma_fence_is_signaled(other));
dma_fence_get(fence);
@ -271,8 +312,10 @@ uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
spin_unlock(&ctx->ring_lock);
dma_fence_put(other);
if (handler)
*handler = seq;
return seq;
return 0;
}
struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
@ -303,6 +346,51 @@ struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
return fence;
}
void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
enum amd_sched_priority priority)
{
int i;
struct amdgpu_device *adev = ctx->adev;
struct amd_sched_rq *rq;
struct amd_sched_entity *entity;
struct amdgpu_ring *ring;
enum amd_sched_priority ctx_prio;
ctx->override_priority = priority;
ctx_prio = (ctx->override_priority == AMD_SCHED_PRIORITY_UNSET) ?
ctx->init_priority : ctx->override_priority;
for (i = 0; i < adev->num_rings; i++) {
ring = adev->rings[i];
entity = &ctx->rings[i].entity;
rq = &ring->sched.sched_rq[ctx_prio];
if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
continue;
amd_sched_entity_set_rq(entity, rq);
}
}
int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx, unsigned ring_id)
{
struct amdgpu_ctx_ring *cring = &ctx->rings[ring_id];
unsigned idx = cring->sequence & (amdgpu_sched_jobs - 1);
struct dma_fence *other = cring->fences[idx];
if (other) {
signed long r;
r = dma_fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT);
if (r < 0) {
DRM_ERROR("Error (%ld) waiting for fence!\n", r);
return r;
}
}
return 0;
}
void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
{
mutex_init(&mgr->lock);

View File

@ -56,6 +56,7 @@
#include "amdgpu_vf_error.h"
#include "amdgpu_amdkfd.h"
#include "amdgpu_pm.h"
MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
@ -65,6 +66,7 @@ MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev);
static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev);
static int amdgpu_debugfs_vbios_dump_init(struct amdgpu_device *adev);
static const char *amdgpu_asic_name[] = {
"TAHITI",
@ -107,10 +109,8 @@ uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
{
uint32_t ret;
if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) {
BUG_ON(in_interrupt());
if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
return amdgpu_virt_kiq_rreg(adev, reg);
}
if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
@ -135,10 +135,8 @@ void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
adev->last_mm_index = v;
}
if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) {
BUG_ON(in_interrupt());
if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
return amdgpu_virt_kiq_wreg(adev, reg, v);
}
if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
@ -402,6 +400,15 @@ void amdgpu_pci_config_reset(struct amdgpu_device *adev)
*/
static int amdgpu_doorbell_init(struct amdgpu_device *adev)
{
/* No doorbell on SI hardware generation */
if (adev->asic_type < CHIP_BONAIRE) {
adev->doorbell.base = 0;
adev->doorbell.size = 0;
adev->doorbell.num_doorbells = 0;
adev->doorbell.ptr = NULL;
return 0;
}
/* doorbell bar mapping */
adev->doorbell.base = pci_resource_start(adev->pdev, 2);
adev->doorbell.size = pci_resource_len(adev->pdev, 2);
@ -539,7 +546,7 @@ int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb)
if (offset < adev->wb.num_wb) {
__set_bit(offset, adev->wb.used);
*wb = offset * 8; /* convert to dw offset */
*wb = offset << 3; /* convert to dw offset */
return 0;
} else {
return -EINVAL;
@ -557,7 +564,7 @@ int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb)
void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb)
{
if (wb < adev->wb.num_wb)
__clear_bit(wb, adev->wb.used);
__clear_bit(wb >> 3, adev->wb.used);
}
/**
@ -646,6 +653,81 @@ void amdgpu_gart_location(struct amdgpu_device *adev, struct amdgpu_mc *mc)
mc->gart_size >> 20, mc->gart_start, mc->gart_end);
}
/*
* Firmware Reservation functions
*/
/**
* amdgpu_fw_reserve_vram_fini - free fw reserved vram
*
* @adev: amdgpu_device pointer
*
* free fw reserved vram if it has been reserved.
*/
void amdgpu_fw_reserve_vram_fini(struct amdgpu_device *adev)
{
amdgpu_bo_free_kernel(&adev->fw_vram_usage.reserved_bo,
NULL, &adev->fw_vram_usage.va);
}
/**
* amdgpu_fw_reserve_vram_init - create bo vram reservation from fw
*
* @adev: amdgpu_device pointer
*
* create bo vram reservation from fw.
*/
int amdgpu_fw_reserve_vram_init(struct amdgpu_device *adev)
{
int r = 0;
u64 gpu_addr;
u64 vram_size = adev->mc.visible_vram_size;
adev->fw_vram_usage.va = NULL;
adev->fw_vram_usage.reserved_bo = NULL;
if (adev->fw_vram_usage.size > 0 &&
adev->fw_vram_usage.size <= vram_size) {
r = amdgpu_bo_create(adev, adev->fw_vram_usage.size,
PAGE_SIZE, true, 0,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, NULL, NULL, 0,
&adev->fw_vram_usage.reserved_bo);
if (r)
goto error_create;
r = amdgpu_bo_reserve(adev->fw_vram_usage.reserved_bo, false);
if (r)
goto error_reserve;
r = amdgpu_bo_pin_restricted(adev->fw_vram_usage.reserved_bo,
AMDGPU_GEM_DOMAIN_VRAM,
adev->fw_vram_usage.start_offset,
(adev->fw_vram_usage.start_offset +
adev->fw_vram_usage.size), &gpu_addr);
if (r)
goto error_pin;
r = amdgpu_bo_kmap(adev->fw_vram_usage.reserved_bo,
&adev->fw_vram_usage.va);
if (r)
goto error_kmap;
amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo);
}
return r;
error_kmap:
amdgpu_bo_unpin(adev->fw_vram_usage.reserved_bo);
error_pin:
amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo);
error_reserve:
amdgpu_bo_unref(&adev->fw_vram_usage.reserved_bo);
error_create:
adev->fw_vram_usage.va = NULL;
adev->fw_vram_usage.reserved_bo = NULL;
return r;
}
/*
* GPU helpers function.
*/
@ -662,27 +744,6 @@ bool amdgpu_need_post(struct amdgpu_device *adev)
{
uint32_t reg;
if (adev->has_hw_reset) {
adev->has_hw_reset = false;
return true;
}
/* bios scratch used on CIK+ */
if (adev->asic_type >= CHIP_BONAIRE)
return amdgpu_atombios_scratch_need_asic_init(adev);
/* check MEM_SIZE for older asics */
reg = amdgpu_asic_get_config_memsize(adev);
if ((reg != 0) && (reg != 0xffffffff))
return false;
return true;
}
static bool amdgpu_vpost_needed(struct amdgpu_device *adev)
{
if (amdgpu_sriov_vf(adev))
return false;
@ -705,7 +766,23 @@ static bool amdgpu_vpost_needed(struct amdgpu_device *adev)
return true;
}
}
return amdgpu_need_post(adev);
if (adev->has_hw_reset) {
adev->has_hw_reset = false;
return true;
}
/* bios scratch used on CIK+ */
if (adev->asic_type >= CHIP_BONAIRE)
return amdgpu_atombios_scratch_need_asic_init(adev);
/* check MEM_SIZE for older asics */
reg = amdgpu_asic_get_config_memsize(adev);
if ((reg != 0) && (reg != 0xffffffff))
return false;
return true;
}
/**
@ -887,6 +964,20 @@ static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
return r;
}
static ssize_t amdgpu_atombios_get_vbios_version(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
struct atom_context *ctx = adev->mode_info.atom_context;
return snprintf(buf, PAGE_SIZE, "%s\n", ctx->vbios_version);
}
static DEVICE_ATTR(vbios_version, 0444, amdgpu_atombios_get_vbios_version,
NULL);
/**
* amdgpu_atombios_fini - free the driver info and callbacks for atombios
*
@ -906,6 +997,7 @@ static void amdgpu_atombios_fini(struct amdgpu_device *adev)
adev->mode_info.atom_context = NULL;
kfree(adev->mode_info.atom_card_info);
adev->mode_info.atom_card_info = NULL;
device_remove_file(adev->dev, &dev_attr_vbios_version);
}
/**
@ -922,6 +1014,7 @@ static int amdgpu_atombios_init(struct amdgpu_device *adev)
{
struct card_info *atom_card_info =
kzalloc(sizeof(struct card_info), GFP_KERNEL);
int ret;
if (!atom_card_info)
return -ENOMEM;
@ -958,6 +1051,13 @@ static int amdgpu_atombios_init(struct amdgpu_device *adev)
amdgpu_atombios_scratch_regs_init(adev);
amdgpu_atombios_allocate_fb_scratch(adev);
}
ret = device_create_file(adev->dev, &dev_attr_vbios_version);
if (ret) {
DRM_ERROR("Failed to create device file for VBIOS version\n");
return ret;
}
return 0;
}
@ -1757,10 +1857,8 @@ static int amdgpu_fini(struct amdgpu_device *adev)
adev->ip_blocks[i].status.late_initialized = false;
}
if (amdgpu_sriov_vf(adev)) {
amdgpu_bo_free_kernel(&adev->virt.csa_obj, &adev->virt.csa_vmid0_addr, NULL);
if (amdgpu_sriov_vf(adev))
amdgpu_virt_release_full_gpu(adev, false);
}
return 0;
}
@ -1848,6 +1946,7 @@ static int amdgpu_sriov_reinit_late(struct amdgpu_device *adev)
static enum amd_ip_block_type ip_order[] = {
AMD_IP_BLOCK_TYPE_SMC,
AMD_IP_BLOCK_TYPE_PSP,
AMD_IP_BLOCK_TYPE_DCE,
AMD_IP_BLOCK_TYPE_GFX,
AMD_IP_BLOCK_TYPE_SDMA,
@ -1933,12 +2032,17 @@ static int amdgpu_resume(struct amdgpu_device *adev)
static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
{
if (adev->is_atom_fw) {
if (amdgpu_atomfirmware_gpu_supports_virtualization(adev))
adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
} else {
if (amdgpu_atombios_has_gpu_virtualization_table(adev))
adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
if (amdgpu_sriov_vf(adev)) {
if (adev->is_atom_fw) {
if (amdgpu_atomfirmware_gpu_supports_virtualization(adev))
adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
} else {
if (amdgpu_atombios_has_gpu_virtualization_table(adev))
adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
}
if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
}
}
@ -1979,6 +2083,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
adev->vm_manager.vm_pte_num_rings = 0;
adev->gart.gart_funcs = NULL;
adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
adev->smc_rreg = &amdgpu_invalid_rreg;
adev->smc_wreg = &amdgpu_invalid_wreg;
@ -2007,8 +2112,10 @@ int amdgpu_device_init(struct amdgpu_device *adev,
mutex_init(&adev->pm.mutex);
mutex_init(&adev->gfx.gpu_clock_mutex);
mutex_init(&adev->srbm_mutex);
mutex_init(&adev->gfx.pipe_reserve_mutex);
mutex_init(&adev->grbm_idx_mutex);
mutex_init(&adev->mn_lock);
mutex_init(&adev->virt.vf_errors.lock);
hash_init(adev->mn_hash);
amdgpu_check_arguments(adev);
@ -2051,9 +2158,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
if (adev->asic_type >= CHIP_BONAIRE)
/* doorbell bar mapping */
amdgpu_doorbell_init(adev);
/* doorbell bar mapping */
amdgpu_doorbell_init(adev);
/* io port mapping */
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
@ -2095,7 +2201,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
r = amdgpu_atombios_init(adev);
if (r) {
dev_err(adev->dev, "amdgpu_atombios_init failed\n");
amdgpu_vf_error_put(AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
goto failed;
}
@ -2103,10 +2209,9 @@ int amdgpu_device_init(struct amdgpu_device *adev,
amdgpu_device_detect_sriov_bios(adev);
/* Post card if necessary */
if (amdgpu_vpost_needed(adev)) {
if (amdgpu_need_post(adev)) {
if (!adev->bios) {
dev_err(adev->dev, "no vBIOS found\n");
amdgpu_vf_error_put(AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
r = -EINVAL;
goto failed;
}
@ -2114,7 +2219,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
if (r) {
dev_err(adev->dev, "gpu post error!\n");
amdgpu_vf_error_put(AMDGIM_ERROR_VF_GPU_POST_ERROR, 0, 0);
goto failed;
}
} else {
@ -2126,7 +2230,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
r = amdgpu_atomfirmware_get_clock_info(adev);
if (r) {
dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
amdgpu_vf_error_put(AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
goto failed;
}
} else {
@ -2134,7 +2238,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
r = amdgpu_atombios_get_clock_info(adev);
if (r) {
dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
amdgpu_vf_error_put(AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
goto failed;
}
/* init i2c buses */
@ -2145,7 +2249,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
r = amdgpu_fence_driver_init(adev);
if (r) {
dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
amdgpu_vf_error_put(AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
goto failed;
}
@ -2155,7 +2259,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
r = amdgpu_init(adev);
if (r) {
dev_err(adev->dev, "amdgpu_init failed\n");
amdgpu_vf_error_put(AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
amdgpu_fini(adev);
goto failed;
}
@ -2175,7 +2279,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
r = amdgpu_ib_pool_init(adev);
if (r) {
dev_err(adev->dev, "IB initialization failed (%d).\n", r);
amdgpu_vf_error_put(AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
goto failed;
}
@ -2183,8 +2287,15 @@ int amdgpu_device_init(struct amdgpu_device *adev,
if (r)
DRM_ERROR("ib ring test failed (%d).\n", r);
if (amdgpu_sriov_vf(adev))
amdgpu_virt_init_data_exchange(adev);
amdgpu_fbdev_init(adev);
r = amdgpu_pm_sysfs_init(adev);
if (r)
DRM_ERROR("registering pm debugfs failed (%d).\n", r);
r = amdgpu_gem_debugfs_init(adev);
if (r)
DRM_ERROR("registering gem debugfs failed (%d).\n", r);
@ -2201,6 +2312,10 @@ int amdgpu_device_init(struct amdgpu_device *adev,
if (r)
DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
r = amdgpu_debugfs_vbios_dump_init(adev);
if (r)
DRM_ERROR("Creating vbios dump debugfs failed (%d).\n", r);
if ((amdgpu_testing & 1)) {
if (adev->accel_working)
amdgpu_test_moves(adev);
@ -2220,7 +2335,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
r = amdgpu_late_init(adev);
if (r) {
dev_err(adev->dev, "amdgpu_late_init failed\n");
amdgpu_vf_error_put(AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
goto failed;
}
@ -2252,6 +2367,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
/* evict vram memory */
amdgpu_bo_evict_vram(adev);
amdgpu_ib_pool_fini(adev);
amdgpu_fw_reserve_vram_fini(adev);
amdgpu_fence_driver_fini(adev);
amdgpu_fbdev_fini(adev);
r = amdgpu_fini(adev);
@ -2276,8 +2392,8 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
adev->rio_mem = NULL;
iounmap(adev->rmmio);
adev->rmmio = NULL;
if (adev->asic_type >= CHIP_BONAIRE)
amdgpu_doorbell_fini(adev);
amdgpu_doorbell_fini(adev);
amdgpu_pm_sysfs_fini(adev);
amdgpu_debugfs_regs_cleanup(adev);
}
@ -2504,6 +2620,9 @@ static bool amdgpu_check_soft_reset(struct amdgpu_device *adev)
int i;
bool asic_hang = false;
if (amdgpu_sriov_vf(adev))
return true;
for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_blocks[i].status.valid)
continue;
@ -2546,7 +2665,8 @@ static bool amdgpu_need_full_reset(struct amdgpu_device *adev)
if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)) {
(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
if (adev->ip_blocks[i].status.hang) {
DRM_INFO("Some block need full reset!\n");
return true;
@ -2654,7 +2774,7 @@ int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, struct amdgpu_job *job)
mutex_lock(&adev->virt.lock_reset);
atomic_inc(&adev->gpu_reset_counter);
adev->gfx.in_reset = true;
adev->in_sriov_reset = true;
/* block TTM */
resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
@ -2765,7 +2885,7 @@ give_up_reset:
dev_info(adev->dev, "GPU reset successed!\n");
}
adev->gfx.in_reset = false;
adev->in_sriov_reset = false;
mutex_unlock(&adev->virt.lock_reset);
return r;
}
@ -2902,7 +3022,6 @@ out:
}
} else {
dev_err(adev->dev, "asic resume failed (%d).\n", r);
amdgpu_vf_error_put(AMDGIM_ERROR_VF_ASIC_RESUME_FAIL, 0, r);
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
if (adev->rings[i] && adev->rings[i]->sched.thread) {
kthread_unpark(adev->rings[i]->sched.thread);
@ -2916,7 +3035,6 @@ out:
if (r) {
/* bad news, how to tell it to userspace ? */
dev_info(adev->dev, "GPU reset failed\n");
amdgpu_vf_error_put(AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
}
else {
dev_info(adev->dev, "GPU reset successed!\n");
@ -3463,10 +3581,7 @@ static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
valuesize = sizeof(values);
if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor)
r = adev->powerplay.pp_funcs->read_sensor(adev->powerplay.pp_handle, idx, &values[0], &valuesize);
else if (adev->pm.funcs && adev->pm.funcs->read_sensor)
r = adev->pm.funcs->read_sensor(adev, idx, &values[0],
&valuesize);
r = amdgpu_dpm_read_sensor(adev, idx, &values[0], &valuesize);
else
return -EINVAL;
@ -3754,6 +3869,28 @@ int amdgpu_debugfs_init(struct drm_minor *minor)
{
return 0;
}
static int amdgpu_debugfs_get_vbios_dump(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct amdgpu_device *adev = dev->dev_private;
seq_write(m, adev->bios, adev->bios_size);
return 0;
}
static const struct drm_info_list amdgpu_vbios_dump_list[] = {
{"amdgpu_vbios",
amdgpu_debugfs_get_vbios_dump,
0, NULL},
};
static int amdgpu_debugfs_vbios_dump_init(struct amdgpu_device *adev)
{
return amdgpu_debugfs_add_files(adev,
amdgpu_vbios_dump_list, 1);
}
#else
static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev)
{
@ -3763,5 +3900,9 @@ static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
{
return 0;
}
static int amdgpu_debugfs_vbios_dump_init(struct amdgpu_device *adev)
{
return 0;
}
static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) { }
#endif

View File

@ -960,8 +960,10 @@ u8 amdgpu_encode_pci_lane_width(u32 lanes)
}
struct amd_vce_state*
amdgpu_get_vce_clock_state(struct amdgpu_device *adev, unsigned idx)
amdgpu_get_vce_clock_state(void *handle, u32 idx)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (idx < adev->pm.dpm.num_of_vce_states)
return &adev->pm.dpm.vce_states[idx];

View File

@ -241,179 +241,125 @@ enum amdgpu_pcie_gen {
AMDGPU_PCIE_GEN_INVALID = 0xffff
};
struct amdgpu_dpm_funcs {
int (*get_temperature)(struct amdgpu_device *adev);
int (*pre_set_power_state)(struct amdgpu_device *adev);
int (*set_power_state)(struct amdgpu_device *adev);
void (*post_set_power_state)(struct amdgpu_device *adev);
void (*display_configuration_changed)(struct amdgpu_device *adev);
u32 (*get_sclk)(struct amdgpu_device *adev, bool low);
u32 (*get_mclk)(struct amdgpu_device *adev, bool low);
void (*print_power_state)(struct amdgpu_device *adev, struct amdgpu_ps *ps);
void (*debugfs_print_current_performance_level)(struct amdgpu_device *adev, struct seq_file *m);
int (*force_performance_level)(struct amdgpu_device *adev, enum amd_dpm_forced_level level);
bool (*vblank_too_short)(struct amdgpu_device *adev);
void (*powergate_uvd)(struct amdgpu_device *adev, bool gate);
void (*powergate_vce)(struct amdgpu_device *adev, bool gate);
void (*enable_bapm)(struct amdgpu_device *adev, bool enable);
void (*set_fan_control_mode)(struct amdgpu_device *adev, u32 mode);
u32 (*get_fan_control_mode)(struct amdgpu_device *adev);
int (*set_fan_speed_percent)(struct amdgpu_device *adev, u32 speed);
int (*get_fan_speed_percent)(struct amdgpu_device *adev, u32 *speed);
int (*force_clock_level)(struct amdgpu_device *adev, enum pp_clock_type type, uint32_t mask);
int (*print_clock_levels)(struct amdgpu_device *adev, enum pp_clock_type type, char *buf);
int (*get_sclk_od)(struct amdgpu_device *adev);
int (*set_sclk_od)(struct amdgpu_device *adev, uint32_t value);
int (*get_mclk_od)(struct amdgpu_device *adev);
int (*set_mclk_od)(struct amdgpu_device *adev, uint32_t value);
int (*check_state_equal)(struct amdgpu_device *adev,
struct amdgpu_ps *cps,
struct amdgpu_ps *rps,
bool *equal);
int (*read_sensor)(struct amdgpu_device *adev, int idx, void *value,
int *size);
#define amdgpu_dpm_pre_set_power_state(adev) \
((adev)->powerplay.pp_funcs->pre_set_power_state((adev)->powerplay.pp_handle))
struct amd_vce_state* (*get_vce_clock_state)(struct amdgpu_device *adev, unsigned idx);
int (*reset_power_profile_state)(struct amdgpu_device *adev,
struct amd_pp_profile *request);
int (*get_power_profile_state)(struct amdgpu_device *adev,
struct amd_pp_profile *query);
int (*set_power_profile_state)(struct amdgpu_device *adev,
struct amd_pp_profile *request);
int (*switch_power_profile)(struct amdgpu_device *adev,
enum amd_pp_profile_type type);
};
#define amdgpu_dpm_set_power_state(adev) \
((adev)->powerplay.pp_funcs->set_power_state((adev)->powerplay.pp_handle))
#define amdgpu_dpm_pre_set_power_state(adev) (adev)->pm.funcs->pre_set_power_state((adev))
#define amdgpu_dpm_set_power_state(adev) (adev)->pm.funcs->set_power_state((adev))
#define amdgpu_dpm_post_set_power_state(adev) (adev)->pm.funcs->post_set_power_state((adev))
#define amdgpu_dpm_display_configuration_changed(adev) (adev)->pm.funcs->display_configuration_changed((adev))
#define amdgpu_dpm_print_power_state(adev, ps) (adev)->pm.funcs->print_power_state((adev), (ps))
#define amdgpu_dpm_vblank_too_short(adev) (adev)->pm.funcs->vblank_too_short((adev))
#define amdgpu_dpm_enable_bapm(adev, e) (adev)->pm.funcs->enable_bapm((adev), (e))
#define amdgpu_dpm_post_set_power_state(adev) \
((adev)->powerplay.pp_funcs->post_set_power_state((adev)->powerplay.pp_handle))
#define amdgpu_dpm_display_configuration_changed(adev) \
((adev)->powerplay.pp_funcs->display_configuration_changed((adev)->powerplay.pp_handle))
#define amdgpu_dpm_print_power_state(adev, ps) \
((adev)->powerplay.pp_funcs->print_power_state((adev)->powerplay.pp_handle, (ps)))
#define amdgpu_dpm_vblank_too_short(adev) \
((adev)->powerplay.pp_funcs->vblank_too_short((adev)->powerplay.pp_handle))
#define amdgpu_dpm_enable_bapm(adev, e) \
((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e)))
#define amdgpu_dpm_read_sensor(adev, idx, value, size) \
((adev)->pp_enabled ? \
(adev)->powerplay.pp_funcs->read_sensor(adev->powerplay.pp_handle, (idx), (value), (size)) : \
(adev)->pm.funcs->read_sensor((adev), (idx), (value), (size)))
((adev)->powerplay.pp_funcs->read_sensor((adev)->powerplay.pp_handle, (idx), (value), (size)))
#define amdgpu_dpm_get_temperature(adev) \
((adev)->pp_enabled ? \
(adev)->powerplay.pp_funcs->get_temperature((adev)->powerplay.pp_handle) : \
(adev)->pm.funcs->get_temperature((adev)))
((adev)->powerplay.pp_funcs->get_temperature((adev)->powerplay.pp_handle))
#define amdgpu_dpm_set_fan_control_mode(adev, m) \
((adev)->pp_enabled ? \
(adev)->powerplay.pp_funcs->set_fan_control_mode((adev)->powerplay.pp_handle, (m)) : \
(adev)->pm.funcs->set_fan_control_mode((adev), (m)))
((adev)->powerplay.pp_funcs->set_fan_control_mode((adev)->powerplay.pp_handle, (m)))
#define amdgpu_dpm_get_fan_control_mode(adev) \
((adev)->pp_enabled ? \
(adev)->powerplay.pp_funcs->get_fan_control_mode((adev)->powerplay.pp_handle) : \
(adev)->pm.funcs->get_fan_control_mode((adev)))
((adev)->powerplay.pp_funcs->get_fan_control_mode((adev)->powerplay.pp_handle))
#define amdgpu_dpm_set_fan_speed_percent(adev, s) \
((adev)->pp_enabled ? \
(adev)->powerplay.pp_funcs->set_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \
(adev)->pm.funcs->set_fan_speed_percent((adev), (s)))
((adev)->powerplay.pp_funcs->set_fan_speed_percent((adev)->powerplay.pp_handle, (s)))
#define amdgpu_dpm_get_fan_speed_percent(adev, s) \
((adev)->pp_enabled ? \
(adev)->powerplay.pp_funcs->get_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \
(adev)->pm.funcs->get_fan_speed_percent((adev), (s)))
((adev)->powerplay.pp_funcs->get_fan_speed_percent((adev)->powerplay.pp_handle, (s)))
#define amdgpu_dpm_get_fan_speed_rpm(adev, s) \
((adev)->pp_enabled ? \
(adev)->powerplay.pp_funcs->get_fan_speed_rpm((adev)->powerplay.pp_handle, (s)) : \
-EINVAL)
((adev)->powerplay.pp_funcs->get_fan_speed_rpm)((adev)->powerplay.pp_handle, (s))
#define amdgpu_dpm_get_sclk(adev, l) \
((adev)->pp_enabled ? \
(adev)->powerplay.pp_funcs->get_sclk((adev)->powerplay.pp_handle, (l)) : \
(adev)->pm.funcs->get_sclk((adev), (l)))
((adev)->powerplay.pp_funcs->get_sclk((adev)->powerplay.pp_handle, (l)))
#define amdgpu_dpm_get_mclk(adev, l) \
((adev)->pp_enabled ? \
(adev)->powerplay.pp_funcs->get_mclk((adev)->powerplay.pp_handle, (l)) : \
(adev)->pm.funcs->get_mclk((adev), (l)))
((adev)->powerplay.pp_funcs->get_mclk((adev)->powerplay.pp_handle, (l)))
#define amdgpu_dpm_force_performance_level(adev, l) \
((adev)->pp_enabled ? \
(adev)->powerplay.pp_funcs->force_performance_level((adev)->powerplay.pp_handle, (l)) : \
(adev)->pm.funcs->force_performance_level((adev), (l)))
((adev)->powerplay.pp_funcs->force_performance_level((adev)->powerplay.pp_handle, (l)))
#define amdgpu_dpm_powergate_uvd(adev, g) \
((adev)->pp_enabled ? \
(adev)->powerplay.pp_funcs->powergate_uvd((adev)->powerplay.pp_handle, (g)) : \
(adev)->pm.funcs->powergate_uvd((adev), (g)))
((adev)->powerplay.pp_funcs->powergate_uvd((adev)->powerplay.pp_handle, (g)))
#define amdgpu_dpm_powergate_vce(adev, g) \
((adev)->pp_enabled ? \
(adev)->powerplay.pp_funcs->powergate_vce((adev)->powerplay.pp_handle, (g)) : \
(adev)->pm.funcs->powergate_vce((adev), (g)))
((adev)->powerplay.pp_funcs->powergate_vce((adev)->powerplay.pp_handle, (g)))
#define amdgpu_dpm_get_current_power_state(adev) \
(adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle)
((adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle))
#define amdgpu_dpm_get_pp_num_states(adev, data) \
(adev)->powerplay.pp_funcs->get_pp_num_states((adev)->powerplay.pp_handle, data)
((adev)->powerplay.pp_funcs->get_pp_num_states((adev)->powerplay.pp_handle, data))
#define amdgpu_dpm_get_pp_table(adev, table) \
(adev)->powerplay.pp_funcs->get_pp_table((adev)->powerplay.pp_handle, table)
((adev)->powerplay.pp_funcs->get_pp_table((adev)->powerplay.pp_handle, table))
#define amdgpu_dpm_set_pp_table(adev, buf, size) \
(adev)->powerplay.pp_funcs->set_pp_table((adev)->powerplay.pp_handle, buf, size)
((adev)->powerplay.pp_funcs->set_pp_table((adev)->powerplay.pp_handle, buf, size))
#define amdgpu_dpm_print_clock_levels(adev, type, buf) \
(adev)->powerplay.pp_funcs->print_clock_levels((adev)->powerplay.pp_handle, type, buf)
((adev)->powerplay.pp_funcs->print_clock_levels((adev)->powerplay.pp_handle, type, buf))
#define amdgpu_dpm_force_clock_level(adev, type, level) \
(adev)->powerplay.pp_funcs->force_clock_level((adev)->powerplay.pp_handle, type, level)
((adev)->powerplay.pp_funcs->force_clock_level((adev)->powerplay.pp_handle, type, level))
#define amdgpu_dpm_get_sclk_od(adev) \
(adev)->powerplay.pp_funcs->get_sclk_od((adev)->powerplay.pp_handle)
((adev)->powerplay.pp_funcs->get_sclk_od((adev)->powerplay.pp_handle))
#define amdgpu_dpm_set_sclk_od(adev, value) \
(adev)->powerplay.pp_funcs->set_sclk_od((adev)->powerplay.pp_handle, value)
((adev)->powerplay.pp_funcs->set_sclk_od((adev)->powerplay.pp_handle, value))
#define amdgpu_dpm_get_mclk_od(adev) \
((adev)->powerplay.pp_funcs->get_mclk_od((adev)->powerplay.pp_handle))
((adev)->powerplay.pp_funcs->get_mclk_od((adev)->powerplay.pp_handle))
#define amdgpu_dpm_set_mclk_od(adev, value) \
((adev)->powerplay.pp_funcs->set_mclk_od((adev)->powerplay.pp_handle, value))
((adev)->powerplay.pp_funcs->set_mclk_od((adev)->powerplay.pp_handle, value))
#define amdgpu_dpm_dispatch_task(adev, event_id, input, output) \
(adev)->powerplay.pp_funcs->dispatch_tasks((adev)->powerplay.pp_handle, (event_id), (input), (output))
#define amdgpu_dpm_dispatch_task(adev, task_id, input, output) \
((adev)->powerplay.pp_funcs->dispatch_tasks)((adev)->powerplay.pp_handle, (task_id), (input), (output))
#define amgdpu_dpm_check_state_equal(adev, cps, rps, equal) (adev)->pm.funcs->check_state_equal((adev), (cps),(rps),(equal))
#define amdgpu_dpm_check_state_equal(adev, cps, rps, equal) \
((adev)->powerplay.pp_funcs->check_state_equal((adev)->powerplay.pp_handle, (cps), (rps), (equal)))
#define amdgpu_dpm_get_vce_clock_state(adev, i) \
((adev)->pp_enabled ? \
(adev)->powerplay.pp_funcs->get_vce_clock_state((adev)->powerplay.pp_handle, (i)) : \
(adev)->pm.funcs->get_vce_clock_state((adev), (i)))
((adev)->powerplay.pp_funcs->get_vce_clock_state((adev)->powerplay.pp_handle, (i)))
#define amdgpu_dpm_get_performance_level(adev) \
((adev)->pp_enabled ? \
(adev)->powerplay.pp_funcs->get_performance_level((adev)->powerplay.pp_handle) : \
(adev)->pm.dpm.forced_level)
#define amdgpu_dpm_get_performance_level(adev) \
((adev)->powerplay.pp_funcs->get_performance_level((adev)->powerplay.pp_handle))
#define amdgpu_dpm_reset_power_profile_state(adev, request) \
((adev)->powerplay.pp_funcs->reset_power_profile_state(\
((adev)->powerplay.pp_funcs->reset_power_profile_state(\
(adev)->powerplay.pp_handle, request))
#define amdgpu_dpm_get_power_profile_state(adev, query) \
((adev)->powerplay.pp_funcs->get_power_profile_state(\
((adev)->powerplay.pp_funcs->get_power_profile_state(\
(adev)->powerplay.pp_handle, query))
#define amdgpu_dpm_set_power_profile_state(adev, request) \
((adev)->powerplay.pp_funcs->set_power_profile_state(\
((adev)->powerplay.pp_funcs->set_power_profile_state(\
(adev)->powerplay.pp_handle, request))
#define amdgpu_dpm_switch_power_profile(adev, type) \
((adev)->powerplay.pp_funcs->switch_power_profile(\
((adev)->powerplay.pp_funcs->switch_power_profile(\
(adev)->powerplay.pp_handle, type))
#define amdgpu_dpm_set_clockgating_by_smu(adev, msg_id) \
((adev)->powerplay.pp_funcs->set_clockgating_by_smu(\
(adev)->powerplay.pp_handle, msg_id))
struct amdgpu_dpm {
struct amdgpu_ps *ps;
/* number of valid power states */
@ -485,7 +431,6 @@ struct amdgpu_pm {
struct amdgpu_dpm dpm;
const struct firmware *fw; /* SMC firmware */
uint32_t fw_version;
const struct amdgpu_dpm_funcs *funcs;
uint32_t pcie_gen_mask;
uint32_t pcie_mlw_mask;
struct amd_pp_display_configuration pm_display_cfg;/* set by DAL */
@ -551,6 +496,6 @@ u16 amdgpu_get_pcie_lane_support(struct amdgpu_device *adev,
u8 amdgpu_encode_pci_lane_width(u32 lanes);
struct amd_vce_state*
amdgpu_get_vce_clock_state(struct amdgpu_device *adev, unsigned idx);
amdgpu_get_vce_clock_state(void *handle, u32 idx);
#endif

View File

@ -69,9 +69,13 @@
* - 3.17.0 - Add AMDGPU_NUM_VRAM_CPU_PAGE_FAULTS.
* - 3.18.0 - Export gpu always on cu bitmap
* - 3.19.0 - Add support for UVD MJPEG decode
* - 3.20.0 - Add support for local BOs
* - 3.21.0 - Add DRM_AMDGPU_FENCE_TO_HANDLE ioctl
* - 3.22.0 - Add DRM_AMDGPU_SCHED ioctl
* - 3.23.0 - Add query for VRAM lost counter
*/
#define KMS_DRIVER_MAJOR 3
#define KMS_DRIVER_MINOR 19
#define KMS_DRIVER_MINOR 23
#define KMS_DRIVER_PATCHLEVEL 0
int amdgpu_vram_limit = 0;
@ -91,7 +95,7 @@ int amdgpu_dpm = -1;
int amdgpu_fw_load_type = -1;
int amdgpu_aspm = -1;
int amdgpu_runtime_pm = -1;
unsigned amdgpu_ip_block_mask = 0xffffffff;
uint amdgpu_ip_block_mask = 0xffffffff;
int amdgpu_bapm = -1;
int amdgpu_deep_color = 0;
int amdgpu_vm_size = -1;
@ -106,14 +110,14 @@ int amdgpu_sched_jobs = 32;
int amdgpu_sched_hw_submission = 2;
int amdgpu_no_evict = 0;
int amdgpu_direct_gma_size = 0;
unsigned amdgpu_pcie_gen_cap = 0;
unsigned amdgpu_pcie_lane_cap = 0;
unsigned amdgpu_cg_mask = 0xffffffff;
unsigned amdgpu_pg_mask = 0xffffffff;
unsigned amdgpu_sdma_phase_quantum = 32;
uint amdgpu_pcie_gen_cap = 0;
uint amdgpu_pcie_lane_cap = 0;
uint amdgpu_cg_mask = 0xffffffff;
uint amdgpu_pg_mask = 0xffffffff;
uint amdgpu_sdma_phase_quantum = 32;
char *amdgpu_disable_cu = NULL;
char *amdgpu_virtual_display = NULL;
unsigned amdgpu_pp_feature_mask = 0xffffffff;
uint amdgpu_pp_feature_mask = 0xffffffff;
int amdgpu_ngg = 0;
int amdgpu_prim_buf_per_se = 0;
int amdgpu_pos_buf_per_se = 0;
@ -121,6 +125,7 @@ int amdgpu_cntl_sb_buf_per_se = 0;
int amdgpu_param_buf_per_se = 0;
int amdgpu_job_hang_limit = 0;
int amdgpu_lbpw = -1;
int amdgpu_compute_multipipe = -1;
MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes");
module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
@ -264,6 +269,9 @@ module_param_named(job_hang_limit, amdgpu_job_hang_limit, int ,0444);
MODULE_PARM_DESC(lbpw, "Load Balancing Per Watt (LBPW) support (1 = enable, 0 = disable, -1 = auto)");
module_param_named(lbpw, amdgpu_lbpw, int, 0444);
MODULE_PARM_DESC(compute_multipipe, "Force compute queues to be spread across pipes (1 = enable, 0 = disable, -1 = auto)");
module_param_named(compute_multipipe, amdgpu_compute_multipipe, int, 0444);
#ifdef CONFIG_DRM_AMDGPU_SI
#if defined(CONFIG_DRM_RADEON) || defined(CONFIG_DRM_RADEON_MODULE)
@ -608,6 +616,8 @@ amdgpu_pci_remove(struct pci_dev *pdev)
drm_dev_unregister(dev);
drm_dev_unref(dev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
}
static void
@ -852,6 +862,7 @@ static struct drm_driver kms_driver = {
.gem_prime_import_sg_table = amdgpu_gem_prime_import_sg_table,
.gem_prime_vmap = amdgpu_gem_prime_vmap,
.gem_prime_vunmap = amdgpu_gem_prime_vunmap,
.gem_prime_mmap = amdgpu_gem_prime_mmap,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,

View File

@ -149,7 +149,7 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
AMDGPU_GEM_CREATE_VRAM_CLEARED,
true, &gobj);
true, NULL, &gobj);
if (ret) {
pr_err("failed to allocate framebuffer (%d)\n", aligned_size);
return -ENOMEM;
@ -303,10 +303,10 @@ static int amdgpu_fbdev_destroy(struct drm_device *dev, struct amdgpu_fbdev *rfb
if (rfb->obj) {
amdgpufb_destroy_pinned_object(rfb->obj);
rfb->obj = NULL;
drm_framebuffer_unregister_private(&rfb->base);
drm_framebuffer_cleanup(&rfb->base);
}
drm_fb_helper_fini(&rfbdev->helper);
drm_framebuffer_unregister_private(&rfb->base);
drm_framebuffer_cleanup(&rfb->base);
return 0;
}

View File

@ -168,6 +168,32 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f)
return 0;
}
/**
* amdgpu_fence_emit_polling - emit a fence on the requeste ring
*
* @ring: ring the fence is associated with
* @s: resulting sequence number
*
* Emits a fence command on the requested ring (all asics).
* Used For polling fence.
* Returns 0 on success, -ENOMEM on failure.
*/
int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s)
{
uint32_t seq;
if (!s)
return -EINVAL;
seq = ++ring->fence_drv.sync_seq;
amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
seq, AMDGPU_FENCE_FLAG_INT);
*s = seq;
return 0;
}
/**
* amdgpu_fence_schedule_fallback - schedule fallback check
*
@ -281,6 +307,30 @@ int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
return r;
}
/**
* amdgpu_fence_wait_polling - busy wait for givn sequence number
*
* @ring: ring index the fence is associated with
* @wait_seq: sequence number to wait
* @timeout: the timeout for waiting in usecs
*
* Wait for all fences on the requested ring to signal (all asics).
* Returns left time if no timeout, 0 or minus if timeout.
*/
signed long amdgpu_fence_wait_polling(struct amdgpu_ring *ring,
uint32_t wait_seq,
signed long timeout)
{
uint32_t seq;
do {
seq = amdgpu_fence_read(ring);
udelay(5);
timeout -= 5;
} while ((int32_t)(wait_seq - seq) > 0 && timeout > 0);
return timeout > 0 ? timeout : 0;
}
/**
* amdgpu_fence_count_emitted - get the count of emitted fences
*
@ -641,6 +691,19 @@ static int amdgpu_debugfs_fence_info(struct seq_file *m, void *data)
atomic_read(&ring->fence_drv.last_seq));
seq_printf(m, "Last emitted 0x%08x\n",
ring->fence_drv.sync_seq);
if (ring->funcs->type != AMDGPU_RING_TYPE_GFX)
continue;
/* set in CP_VMID_PREEMPT and preemption occurred */
seq_printf(m, "Last preempted 0x%08x\n",
le32_to_cpu(*(ring->fence_drv.cpu_addr + 2)));
/* set in CP_VMID_RESET and reset occurred */
seq_printf(m, "Last reset 0x%08x\n",
le32_to_cpu(*(ring->fence_drv.cpu_addr + 4)));
/* Both preemption and reset occurred */
seq_printf(m, "Last both 0x%08x\n",
le32_to_cpu(*(ring->fence_drv.cpu_addr + 6)));
}
return 0;
}

View File

@ -332,12 +332,13 @@ int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
adev->gart.pages[p] = pagelist[i];
#endif
if (adev->gart.ptr) {
r = amdgpu_gart_map(adev, offset, pages, dma_addr, flags,
adev->gart.ptr);
if (r)
return r;
}
if (!adev->gart.ptr)
return 0;
r = amdgpu_gart_map(adev, offset, pages, dma_addr, flags,
adev->gart.ptr);
if (r)
return r;
mb();
amdgpu_gart_flush_gpu_tlb(adev, 0);

View File

@ -44,11 +44,12 @@ void amdgpu_gem_object_free(struct drm_gem_object *gobj)
}
int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
int alignment, u32 initial_domain,
u64 flags, bool kernel,
struct drm_gem_object **obj)
int alignment, u32 initial_domain,
u64 flags, bool kernel,
struct reservation_object *resv,
struct drm_gem_object **obj)
{
struct amdgpu_bo *robj;
struct amdgpu_bo *bo;
int r;
*obj = NULL;
@ -59,7 +60,7 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
retry:
r = amdgpu_bo_create(adev, size, alignment, kernel, initial_domain,
flags, NULL, NULL, 0, &robj);
flags, NULL, resv, 0, &bo);
if (r) {
if (r != -ERESTARTSYS) {
if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
@ -71,7 +72,7 @@ retry:
}
return r;
}
*obj = &robj->gem_base;
*obj = &bo->gem_base;
return 0;
}
@ -112,7 +113,17 @@ int amdgpu_gem_object_open(struct drm_gem_object *obj,
struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
struct amdgpu_vm *vm = &fpriv->vm;
struct amdgpu_bo_va *bo_va;
struct mm_struct *mm;
int r;
mm = amdgpu_ttm_tt_get_usermm(abo->tbo.ttm);
if (mm && mm != current->mm)
return -EPERM;
if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID &&
abo->tbo.resv != vm->root.base.bo->tbo.resv)
return -EPERM;
r = amdgpu_bo_reserve(abo, false);
if (r)
return r;
@ -127,35 +138,6 @@ int amdgpu_gem_object_open(struct drm_gem_object *obj,
return 0;
}
static int amdgpu_gem_vm_check(void *param, struct amdgpu_bo *bo)
{
/* if anything is swapped out don't swap it in here,
just abort and wait for the next CS */
if (!amdgpu_bo_gpu_accessible(bo))
return -ERESTARTSYS;
if (bo->shadow && !amdgpu_bo_gpu_accessible(bo->shadow))
return -ERESTARTSYS;
return 0;
}
static bool amdgpu_gem_vm_ready(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
struct list_head *list)
{
struct ttm_validate_buffer *entry;
list_for_each_entry(entry, list, head) {
struct amdgpu_bo *bo =
container_of(entry->bo, struct amdgpu_bo, tbo);
if (amdgpu_gem_vm_check(NULL, bo))
return false;
}
return !amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_gem_vm_check, NULL);
}
void amdgpu_gem_object_close(struct drm_gem_object *obj,
struct drm_file *file_priv)
{
@ -165,13 +147,14 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
struct amdgpu_vm *vm = &fpriv->vm;
struct amdgpu_bo_list_entry vm_pd;
struct list_head list;
struct list_head list, duplicates;
struct ttm_validate_buffer tv;
struct ww_acquire_ctx ticket;
struct amdgpu_bo_va *bo_va;
int r;
INIT_LIST_HEAD(&list);
INIT_LIST_HEAD(&duplicates);
tv.bo = &bo->tbo;
tv.shared = true;
@ -179,7 +162,7 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates);
if (r) {
dev_err(adev->dev, "leaking bo va because "
"we fail to reserve bo (%d)\n", r);
@ -189,7 +172,7 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
if (bo_va && --bo_va->ref_count == 0) {
amdgpu_vm_bo_rmv(adev, bo_va);
if (amdgpu_gem_vm_ready(adev, vm, &list)) {
if (amdgpu_vm_ready(vm)) {
struct dma_fence *fence = NULL;
r = amdgpu_vm_clear_freed(adev, vm, &fence);
@ -214,18 +197,24 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_fpriv *fpriv = filp->driver_priv;
struct amdgpu_vm *vm = &fpriv->vm;
union drm_amdgpu_gem_create *args = data;
uint64_t flags = args->in.domain_flags;
uint64_t size = args->in.bo_size;
struct reservation_object *resv = NULL;
struct drm_gem_object *gobj;
uint32_t handle;
bool kernel = false;
int r;
/* reject invalid gem flags */
if (args->in.domain_flags & ~(AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
AMDGPU_GEM_CREATE_CPU_GTT_USWC |
AMDGPU_GEM_CREATE_VRAM_CLEARED))
if (flags & ~(AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
AMDGPU_GEM_CREATE_CPU_GTT_USWC |
AMDGPU_GEM_CREATE_VRAM_CLEARED |
AMDGPU_GEM_CREATE_VM_ALWAYS_VALID |
AMDGPU_GEM_CREATE_EXPLICIT_SYNC))
return -EINVAL;
/* reject invalid gem domains */
@ -240,7 +229,7 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
/* create a gem object to contain this object in */
if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
kernel = true;
flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
if (args->in.domains == AMDGPU_GEM_DOMAIN_GDS)
size = size << AMDGPU_GDS_SHIFT;
else if (args->in.domains == AMDGPU_GEM_DOMAIN_GWS)
@ -252,10 +241,25 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
}
size = roundup(size, PAGE_SIZE);
if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
r = amdgpu_bo_reserve(vm->root.base.bo, false);
if (r)
return r;
resv = vm->root.base.bo->tbo.resv;
}
r = amdgpu_gem_object_create(adev, size, args->in.alignment,
(u32)(0xffffffff & args->in.domains),
args->in.domain_flags,
kernel, &gobj);
flags, false, resv, &gobj);
if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
if (!r) {
struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
abo->parent = amdgpu_bo_ref(vm->root.base.bo);
}
amdgpu_bo_unreserve(vm->root.base.bo);
}
if (r)
return r;
@ -297,9 +301,8 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
}
/* create a gem object to contain this object in */
r = amdgpu_gem_object_create(adev, args->size, 0,
AMDGPU_GEM_DOMAIN_CPU, 0,
0, &gobj);
r = amdgpu_gem_object_create(adev, args->size, 0, AMDGPU_GEM_DOMAIN_CPU,
0, 0, NULL, &gobj);
if (r)
return r;
@ -317,8 +320,6 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
}
if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
down_read(&current->mm->mmap_sem);
r = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm,
bo->tbo.ttm->pages);
if (r)
@ -333,8 +334,6 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
amdgpu_bo_unreserve(bo);
if (r)
goto free_pages;
up_read(&current->mm->mmap_sem);
}
r = drm_gem_handle_create(filp, gobj, &handle);
@ -511,10 +510,10 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
struct list_head *list,
uint32_t operation)
{
int r = -ERESTARTSYS;
int r;
if (!amdgpu_gem_vm_ready(adev, vm, list))
goto error;
if (!amdgpu_vm_ready(vm))
return;
r = amdgpu_vm_update_directories(adev, vm);
if (r)
@ -551,7 +550,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
struct amdgpu_bo_list_entry vm_pd;
struct ttm_validate_buffer tv;
struct ww_acquire_ctx ticket;
struct list_head list;
struct list_head list, duplicates;
uint64_t va_flags;
int r = 0;
@ -580,13 +579,9 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
args->operation);
return -EINVAL;
}
if ((args->operation == AMDGPU_VA_OP_MAP) ||
(args->operation == AMDGPU_VA_OP_REPLACE)) {
if (amdgpu_kms_vram_lost(adev, fpriv))
return -ENODEV;
}
INIT_LIST_HEAD(&list);
INIT_LIST_HEAD(&duplicates);
if ((args->operation != AMDGPU_VA_OP_CLEAR) &&
!(args->flags & AMDGPU_VM_PAGE_PRT)) {
gobj = drm_gem_object_lookup(filp, args->handle);
@ -603,7 +598,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd);
r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
if (r)
goto error_unref;
@ -669,6 +664,7 @@ error_unref:
int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
struct amdgpu_device *adev = dev->dev_private;
struct drm_amdgpu_gem_op *args = data;
struct drm_gem_object *gobj;
struct amdgpu_bo *robj;
@ -716,6 +712,9 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
if (robj->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
robj->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
if (robj->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
amdgpu_vm_bo_invalidate(adev, robj, true);
amdgpu_bo_unreserve(robj);
break;
default:
@ -745,8 +744,7 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv,
r = amdgpu_gem_object_create(adev, args->size, 0,
AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
ttm_bo_type_device,
&gobj);
false, NULL, &gobj);
if (r)
return -ENOMEM;

View File

@ -109,9 +109,26 @@ void amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se, unsigned max_s
}
}
static bool amdgpu_gfx_is_multipipe_capable(struct amdgpu_device *adev)
{
if (amdgpu_compute_multipipe != -1) {
DRM_INFO("amdgpu: forcing compute pipe policy %d\n",
amdgpu_compute_multipipe);
return amdgpu_compute_multipipe == 1;
}
/* FIXME: spreading the queues across pipes causes perf regressions
* on POLARIS11 compute workloads */
if (adev->asic_type == CHIP_POLARIS11)
return false;
return adev->gfx.mec.num_mec > 1;
}
void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev)
{
int i, queue, pipe, mec;
bool multipipe_policy = amdgpu_gfx_is_multipipe_capable(adev);
/* policy for amdgpu compute queue ownership */
for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) {
@ -125,8 +142,7 @@ void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev)
if (mec >= adev->gfx.mec.num_mec)
break;
/* FIXME: spreading the queues across pipes causes perf regressions */
if (0) {
if (multipipe_policy) {
/* policy: amdgpu owns the first two queues of the first MEC */
if (mec == 0 && queue < 2)
set_bit(i, adev->gfx.mec.queue_bitmap);
@ -185,7 +201,7 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,
struct amdgpu_kiq *kiq = &adev->gfx.kiq;
int r = 0;
mutex_init(&kiq->ring_mutex);
spin_lock_init(&kiq->ring_lock);
r = amdgpu_wb_get(adev, &adev->virt.reg_val_offs);
if (r)
@ -260,8 +276,13 @@ int amdgpu_gfx_compute_mqd_sw_init(struct amdgpu_device *adev,
/* create MQD for KIQ */
ring = &adev->gfx.kiq.ring;
if (!ring->mqd_obj) {
/* originaly the KIQ MQD is put in GTT domain, but for SRIOV VRAM domain is a must
* otherwise hypervisor trigger SAVE_VF fail after driver unloaded which mean MQD
* deallocated and gart_unbind, to strict diverage we decide to use VRAM domain for
* KIQ MQD no matter SRIOV or Bare-metal
*/
r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj,
AMDGPU_GEM_DOMAIN_VRAM, &ring->mqd_obj,
&ring->mqd_gpu_addr, &ring->mqd_ptr);
if (r) {
dev_warn(adev->dev, "failed to create ring mqd ob (%d)", r);

View File

@ -169,7 +169,8 @@ static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man,
int r;
spin_lock(&mgr->lock);
if (atomic64_read(&mgr->available) < mem->num_pages) {
if ((&tbo->mem == mem || tbo->mem.mem_type != TTM_PL_TT) &&
atomic64_read(&mgr->available) < mem->num_pages) {
spin_unlock(&mgr->lock);
return 0;
}
@ -244,8 +245,9 @@ static void amdgpu_gtt_mgr_del(struct ttm_mem_type_manager *man,
uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man)
{
struct amdgpu_gtt_mgr *mgr = man->priv;
s64 result = man->size - atomic64_read(&mgr->available);
return (u64)(man->size - atomic64_read(&mgr->available)) * PAGE_SIZE;
return (result > 0 ? result : 0) * PAGE_SIZE;
}
/**
@ -265,7 +267,7 @@ static void amdgpu_gtt_mgr_debug(struct ttm_mem_type_manager *man,
drm_mm_print(&mgr->mm, printer);
spin_unlock(&mgr->lock);
drm_printf(printer, "man size:%llu pages, gtt available:%llu pages, usage:%lluMB\n",
drm_printf(printer, "man size:%llu pages, gtt available:%lld pages, usage:%lluMB\n",
man->size, (u64)atomic64_read(&mgr->available),
amdgpu_gtt_mgr_usage(man) >> 20);
}

View File

@ -169,6 +169,12 @@ restart_ih:
while (adev->irq.ih.rptr != wptr) {
u32 ring_index = adev->irq.ih.rptr >> 2;
/* Prescreening of high-frequency interrupts */
if (!amdgpu_ih_prescreen_iv(adev)) {
adev->irq.ih.rptr &= adev->irq.ih.ptr_mask;
continue;
}
/* Before dispatching irq to IP blocks, send it to amdkfd */
amdgpu_amdkfd_interrupt(adev,
(const void *) &adev->irq.ih.ring[ring_index]);
@ -190,3 +196,79 @@ restart_ih:
return IRQ_HANDLED;
}
/**
* amdgpu_ih_add_fault - Add a page fault record
*
* @adev: amdgpu device pointer
* @key: 64-bit encoding of PASID and address
*
* This should be called when a retry page fault interrupt is
* received. If this is a new page fault, it will be added to a hash
* table. The return value indicates whether this is a new fault, or
* a fault that was already known and is already being handled.
*
* If there are too many pending page faults, this will fail. Retry
* interrupts should be ignored in this case until there is enough
* free space.
*
* Returns 0 if the fault was added, 1 if the fault was already known,
* -ENOSPC if there are too many pending faults.
*/
int amdgpu_ih_add_fault(struct amdgpu_device *adev, u64 key)
{
unsigned long flags;
int r = -ENOSPC;
if (WARN_ON_ONCE(!adev->irq.ih.faults))
/* Should be allocated in <IP>_ih_sw_init on GPUs that
* support retry faults and require retry filtering.
*/
return r;
spin_lock_irqsave(&adev->irq.ih.faults->lock, flags);
/* Only let the hash table fill up to 50% for best performance */
if (adev->irq.ih.faults->count >= (1 << (AMDGPU_PAGEFAULT_HASH_BITS-1)))
goto unlock_out;
r = chash_table_copy_in(&adev->irq.ih.faults->hash, key, NULL);
if (!r)
adev->irq.ih.faults->count++;
/* chash_table_copy_in should never fail unless we're losing count */
WARN_ON_ONCE(r < 0);
unlock_out:
spin_unlock_irqrestore(&adev->irq.ih.faults->lock, flags);
return r;
}
/**
* amdgpu_ih_clear_fault - Remove a page fault record
*
* @adev: amdgpu device pointer
* @key: 64-bit encoding of PASID and address
*
* This should be called when a page fault has been handled. Any
* future interrupt with this key will be processed as a new
* page fault.
*/
void amdgpu_ih_clear_fault(struct amdgpu_device *adev, u64 key)
{
unsigned long flags;
int r;
if (!adev->irq.ih.faults)
return;
spin_lock_irqsave(&adev->irq.ih.faults->lock, flags);
r = chash_table_remove(&adev->irq.ih.faults->hash, key, NULL);
if (!WARN_ON_ONCE(r < 0)) {
adev->irq.ih.faults->count--;
WARN_ON_ONCE(adev->irq.ih.faults->count < 0);
}
spin_unlock_irqrestore(&adev->irq.ih.faults->lock, flags);
}

View File

@ -24,6 +24,8 @@
#ifndef __AMDGPU_IH_H__
#define __AMDGPU_IH_H__
#include <linux/chash.h>
struct amdgpu_device;
/*
* vega10+ IH clients
@ -69,6 +71,13 @@ enum amdgpu_ih_clientid
#define AMDGPU_IH_CLIENTID_LEGACY 0
#define AMDGPU_PAGEFAULT_HASH_BITS 8
struct amdgpu_retryfault_hashtable {
DECLARE_CHASH_TABLE(hash, AMDGPU_PAGEFAULT_HASH_BITS, 8, 0);
spinlock_t lock;
int count;
};
/*
* R6xx+ IH ring
*/
@ -87,6 +96,7 @@ struct amdgpu_ih_ring {
bool use_doorbell;
bool use_bus_addr;
dma_addr_t rb_dma_addr; /* only used when use_bus_addr = true */
struct amdgpu_retryfault_hashtable *faults;
};
#define AMDGPU_IH_SRC_DATA_MAX_SIZE_DW 4
@ -109,5 +119,7 @@ int amdgpu_ih_ring_init(struct amdgpu_device *adev, unsigned ring_size,
bool use_bus_addr);
void amdgpu_ih_ring_fini(struct amdgpu_device *adev);
int amdgpu_ih_process(struct amdgpu_device *adev);
int amdgpu_ih_add_fault(struct amdgpu_device *adev, u64 key);
void amdgpu_ih_clear_fault(struct amdgpu_device *adev, u64 key);
#endif

View File

@ -65,6 +65,7 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
amdgpu_sync_create(&(*job)->sync);
amdgpu_sync_create(&(*job)->dep_sync);
amdgpu_sync_create(&(*job)->sched_sync);
(*job)->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
return 0;
}
@ -103,6 +104,7 @@ static void amdgpu_job_free_cb(struct amd_sched_job *s_job)
{
struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
amdgpu_ring_priority_put(job->ring, amd_sched_get_job_priority(s_job));
dma_fence_put(job->fence);
amdgpu_sync_free(&job->sync);
amdgpu_sync_free(&job->dep_sync);
@ -139,6 +141,8 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
job->fence_ctx = entity->fence_context;
*f = dma_fence_get(&job->base.s_fence->finished);
amdgpu_job_free_resources(job);
amdgpu_ring_priority_get(job->ring,
amd_sched_get_job_priority(&job->base));
amd_sched_entity_push_job(&job->base);
return 0;
@ -177,8 +181,8 @@ static struct dma_fence *amdgpu_job_dependency(struct amd_sched_job *sched_job)
static struct dma_fence *amdgpu_job_run(struct amd_sched_job *sched_job)
{
struct dma_fence *fence = NULL;
struct amdgpu_device *adev;
struct amdgpu_job *job;
struct amdgpu_fpriv *fpriv = NULL;
int r;
if (!sched_job) {
@ -186,23 +190,25 @@ static struct dma_fence *amdgpu_job_run(struct amd_sched_job *sched_job)
return NULL;
}
job = to_amdgpu_job(sched_job);
adev = job->adev;
BUG_ON(amdgpu_sync_peek_fence(&job->sync, NULL));
trace_amdgpu_sched_run_job(job);
if (job->vm)
fpriv = container_of(job->vm, struct amdgpu_fpriv, vm);
/* skip ib schedule when vram is lost */
if (fpriv && amdgpu_kms_vram_lost(job->adev, fpriv))
if (job->vram_lost_counter != atomic_read(&adev->vram_lost_counter)) {
dma_fence_set_error(&job->base.s_fence->finished, -ECANCELED);
DRM_ERROR("Skip scheduling IBs!\n");
else {
r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs, job, &fence);
} else {
r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs, job,
&fence);
if (r)
DRM_ERROR("Error scheduling IBs (%d)\n", r);
}
/* if gpu reset, hw fence will be replaced here */
dma_fence_put(job->fence);
job->fence = dma_fence_get(fence);
amdgpu_job_free_resources(job);
return fence;
}

View File

@ -28,6 +28,7 @@
#include <drm/drmP.h>
#include "amdgpu.h"
#include <drm/amdgpu_drm.h>
#include "amdgpu_sched.h"
#include "amdgpu_uvd.h"
#include "amdgpu_vce.h"
@ -269,7 +270,6 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
{
struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_fpriv *fpriv = filp->driver_priv;
struct drm_amdgpu_info *info = data;
struct amdgpu_mode_info *minfo = &adev->mode_info;
void __user *out = (void __user *)(uintptr_t)info->return_pointer;
@ -282,8 +282,6 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
if (!info->return_size || !info->return_pointer)
return -EINVAL;
if (amdgpu_kms_vram_lost(adev, fpriv))
return -ENODEV;
switch (info->query) {
case AMDGPU_INFO_ACCEL_WORKING:
@ -765,6 +763,9 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
}
return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
}
case AMDGPU_INFO_VRAM_LOST_COUNTER:
ui32 = atomic_read(&adev->vram_lost_counter);
return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
default:
DRM_DEBUG_KMS("Invalid request %d\n", info->query);
return -EINVAL;
@ -791,12 +792,6 @@ void amdgpu_driver_lastclose_kms(struct drm_device *dev)
vga_switcheroo_process_delayed_switch();
}
bool amdgpu_kms_vram_lost(struct amdgpu_device *adev,
struct amdgpu_fpriv *fpriv)
{
return fpriv->vram_lost_counter != atomic_read(&adev->vram_lost_counter);
}
/**
* amdgpu_driver_open_kms - drm callback for open
*
@ -825,7 +820,7 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
}
r = amdgpu_vm_init(adev, &fpriv->vm,
AMDGPU_VM_CONTEXT_GFX);
AMDGPU_VM_CONTEXT_GFX, 0);
if (r) {
kfree(fpriv);
goto out_suspend;
@ -841,8 +836,11 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
if (amdgpu_sriov_vf(adev)) {
r = amdgpu_map_static_csa(adev, &fpriv->vm, &fpriv->csa_va);
if (r)
if (r) {
amdgpu_vm_fini(adev, &fpriv->vm);
kfree(fpriv);
goto out_suspend;
}
}
mutex_init(&fpriv->bo_list_lock);
@ -850,7 +848,6 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
amdgpu_ctx_mgr_init(&fpriv->ctx_mgr);
fpriv->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
file_priv->driver_priv = fpriv;
out_suspend:
@ -1020,7 +1017,9 @@ const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_CREATE, amdgpu_gem_create_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_CTX, amdgpu_ctx_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_VM, amdgpu_vm_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_SCHED, amdgpu_sched_ioctl, DRM_MASTER),
DRM_IOCTL_DEF_DRV(AMDGPU_BO_LIST, amdgpu_bo_list_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_FENCE_TO_HANDLE, amdgpu_cs_fence_to_handle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
/* KMS */
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_MMAP, amdgpu_gem_mmap_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_WAIT_IDLE, amdgpu_gem_wait_idle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),

View File

@ -50,8 +50,10 @@ struct amdgpu_mn {
struct hlist_node node;
/* objects protected by lock */
struct mutex lock;
struct rw_semaphore lock;
struct rb_root_cached objects;
struct mutex read_lock;
atomic_t recursion;
};
struct amdgpu_mn_node {
@ -74,7 +76,7 @@ static void amdgpu_mn_destroy(struct work_struct *work)
struct amdgpu_bo *bo, *next_bo;
mutex_lock(&adev->mn_lock);
mutex_lock(&rmn->lock);
down_write(&rmn->lock);
hash_del(&rmn->node);
rbtree_postorder_for_each_entry_safe(node, next_node,
&rmn->objects.rb_root, it.rb) {
@ -84,7 +86,7 @@ static void amdgpu_mn_destroy(struct work_struct *work)
}
kfree(node);
}
mutex_unlock(&rmn->lock);
up_write(&rmn->lock);
mutex_unlock(&adev->mn_lock);
mmu_notifier_unregister_no_release(&rmn->mn, rmn->mm);
kfree(rmn);
@ -106,6 +108,53 @@ static void amdgpu_mn_release(struct mmu_notifier *mn,
schedule_work(&rmn->work);
}
/**
* amdgpu_mn_lock - take the write side lock for this mn
*/
void amdgpu_mn_lock(struct amdgpu_mn *mn)
{
if (mn)
down_write(&mn->lock);
}
/**
* amdgpu_mn_unlock - drop the write side lock for this mn
*/
void amdgpu_mn_unlock(struct amdgpu_mn *mn)
{
if (mn)
up_write(&mn->lock);
}
/**
* amdgpu_mn_read_lock - take the rmn read lock
*
* @rmn: our notifier
*
* Take the rmn read side lock.
*/
static void amdgpu_mn_read_lock(struct amdgpu_mn *rmn)
{
mutex_lock(&rmn->read_lock);
if (atomic_inc_return(&rmn->recursion) == 1)
down_read_non_owner(&rmn->lock);
mutex_unlock(&rmn->read_lock);
}
/**
* amdgpu_mn_read_unlock - drop the rmn read lock
*
* @rmn: our notifier
*
* Drop the rmn read side lock.
*/
static void amdgpu_mn_read_unlock(struct amdgpu_mn *rmn)
{
if (atomic_dec_return(&rmn->recursion) == 0)
up_read_non_owner(&rmn->lock);
}
/**
* amdgpu_mn_invalidate_node - unmap all BOs of a node
*
@ -126,23 +175,12 @@ static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node,
if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start, end))
continue;
r = amdgpu_bo_reserve(bo, true);
if (r) {
DRM_ERROR("(%ld) failed to reserve user bo\n", r);
continue;
}
r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
true, false, MAX_SCHEDULE_TIMEOUT);
if (r <= 0)
DRM_ERROR("(%ld) failed to wait for user bo\n", r);
amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
if (r)
DRM_ERROR("(%ld) failed to validate user bo\n", r);
amdgpu_bo_unreserve(bo);
amdgpu_ttm_tt_mark_user_pages(bo->tbo.ttm);
}
}
@ -168,7 +206,7 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn,
/* notification is exclusive, but interval is inclusive */
end -= 1;
mutex_lock(&rmn->lock);
amdgpu_mn_read_lock(rmn);
it = interval_tree_iter_first(&rmn->objects, start, end);
while (it) {
@ -179,13 +217,32 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn,
amdgpu_mn_invalidate_node(node, start, end);
}
}
mutex_unlock(&rmn->lock);
/**
* amdgpu_mn_invalidate_range_end - callback to notify about mm change
*
* @mn: our notifier
* @mn: the mm this callback is about
* @start: start of updated range
* @end: end of updated range
*
* Release the lock again to allow new command submissions.
*/
static void amdgpu_mn_invalidate_range_end(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long start,
unsigned long end)
{
struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
amdgpu_mn_read_unlock(rmn);
}
static const struct mmu_notifier_ops amdgpu_mn_ops = {
.release = amdgpu_mn_release,
.invalidate_range_start = amdgpu_mn_invalidate_range_start,
.invalidate_range_end = amdgpu_mn_invalidate_range_end,
};
/**
@ -195,7 +252,7 @@ static const struct mmu_notifier_ops amdgpu_mn_ops = {
*
* Creates a notifier context for current->mm.
*/
static struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev)
struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev)
{
struct mm_struct *mm = current->mm;
struct amdgpu_mn *rmn;
@ -220,8 +277,10 @@ static struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev)
rmn->adev = adev;
rmn->mm = mm;
rmn->mn.ops = &amdgpu_mn_ops;
mutex_init(&rmn->lock);
init_rwsem(&rmn->lock);
rmn->objects = RB_ROOT_CACHED;
mutex_init(&rmn->read_lock);
atomic_set(&rmn->recursion, 0);
r = __mmu_notifier_register(&rmn->mn, mm);
if (r)
@ -267,7 +326,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
INIT_LIST_HEAD(&bos);
mutex_lock(&rmn->lock);
down_write(&rmn->lock);
while ((it = interval_tree_iter_first(&rmn->objects, addr, end))) {
kfree(node);
@ -281,7 +340,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
if (!node) {
node = kmalloc(sizeof(struct amdgpu_mn_node), GFP_KERNEL);
if (!node) {
mutex_unlock(&rmn->lock);
up_write(&rmn->lock);
return -ENOMEM;
}
}
@ -296,7 +355,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
interval_tree_insert(&node->it, &rmn->objects);
mutex_unlock(&rmn->lock);
up_write(&rmn->lock);
return 0;
}
@ -322,7 +381,7 @@ void amdgpu_mn_unregister(struct amdgpu_bo *bo)
return;
}
mutex_lock(&rmn->lock);
down_write(&rmn->lock);
/* save the next list entry for later */
head = bo->mn_list.next;
@ -337,6 +396,7 @@ void amdgpu_mn_unregister(struct amdgpu_bo *bo)
kfree(node);
}
mutex_unlock(&rmn->lock);
up_write(&rmn->lock);
mutex_unlock(&adev->mn_lock);
}

View File

@ -0,0 +1,52 @@
/*
* Copyright 2017 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Christian König
*/
#ifndef __AMDGPU_MN_H__
#define __AMDGPU_MN_H__
/*
* MMU Notifier
*/
struct amdgpu_mn;
#if defined(CONFIG_MMU_NOTIFIER)
void amdgpu_mn_lock(struct amdgpu_mn *mn);
void amdgpu_mn_unlock(struct amdgpu_mn *mn);
struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev);
int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr);
void amdgpu_mn_unregister(struct amdgpu_bo *bo);
#else
static inline void amdgpu_mn_lock(struct amdgpu_mn *mn) {}
static inline void amdgpu_mn_unlock(struct amdgpu_mn *mn) {}
static inline struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev)
{
return NULL;
}
static inline int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
{
return -ENODEV;
}
static inline void amdgpu_mn_unregister(struct amdgpu_bo *bo) {}
#endif
#endif

View File

@ -40,9 +40,7 @@
static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
struct amdgpu_bo *bo;
bo = container_of(tbo, struct amdgpu_bo, tbo);
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
amdgpu_bo_kunmap(bo);
@ -64,11 +62,12 @@ bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo)
return false;
}
static void amdgpu_ttm_placement_init(struct amdgpu_device *adev,
struct ttm_placement *placement,
struct ttm_place *places,
u32 domain, u64 flags)
void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
struct ttm_placement *placement = &abo->placement;
struct ttm_place *places = abo->placements;
u64 flags = abo->flags;
u32 c = 0;
if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
@ -151,27 +150,6 @@ static void amdgpu_ttm_placement_init(struct amdgpu_device *adev,
placement->busy_placement = places;
}
void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
amdgpu_ttm_placement_init(adev, &abo->placement, abo->placements,
domain, abo->flags);
}
static void amdgpu_fill_placement_to_bo(struct amdgpu_bo *bo,
struct ttm_placement *placement)
{
BUG_ON(placement->num_placement > (AMDGPU_GEM_DOMAIN_MAX + 1));
memcpy(bo->placements, placement->placement,
placement->num_placement * sizeof(struct ttm_place));
bo->placement.num_placement = placement->num_placement;
bo->placement.num_busy_placement = placement->num_busy_placement;
bo->placement.placement = bo->placements;
bo->placement.busy_placement = bo->placements;
}
/**
* amdgpu_bo_create_reserved - create reserved BO for kernel use
*
@ -303,14 +281,13 @@ void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
*cpu_addr = NULL;
}
int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
unsigned long size, int byte_align,
bool kernel, u32 domain, u64 flags,
struct sg_table *sg,
struct ttm_placement *placement,
struct reservation_object *resv,
uint64_t init_value,
struct amdgpu_bo **bo_ptr)
static int amdgpu_bo_do_create(struct amdgpu_device *adev,
unsigned long size, int byte_align,
bool kernel, u32 domain, u64 flags,
struct sg_table *sg,
struct reservation_object *resv,
uint64_t init_value,
struct amdgpu_bo **bo_ptr)
{
struct amdgpu_bo *bo;
enum ttm_bo_type type;
@ -384,13 +361,17 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
#endif
amdgpu_fill_placement_to_bo(bo, placement);
/* Kernel allocation are uninterruptible */
bo->tbo.bdev = &adev->mman.bdev;
amdgpu_ttm_placement_from_domain(bo, domain);
initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
/* Kernel allocation are uninterruptible */
r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, type,
&bo->placement, page_align, !kernel, NULL,
acc_size, sg, resv, &amdgpu_ttm_bo_destroy);
if (unlikely(r != 0))
return r;
bytes_moved = atomic64_read(&adev->num_bytes_moved) -
initial_bytes_moved;
if (adev->mc.visible_vram_size < adev->mc.real_vram_size &&
@ -400,9 +381,6 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
else
amdgpu_cs_report_moved_bytes(adev, bytes_moved, 0);
if (unlikely(r != 0))
return r;
if (kernel)
bo->tbo.priority = 1;
@ -442,27 +420,17 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
unsigned long size, int byte_align,
struct amdgpu_bo *bo)
{
struct ttm_placement placement = {0};
struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1];
int r;
if (bo->shadow)
return 0;
memset(&placements, 0, sizeof(placements));
amdgpu_ttm_placement_init(adev, &placement, placements,
AMDGPU_GEM_DOMAIN_GTT,
AMDGPU_GEM_CREATE_CPU_GTT_USWC |
AMDGPU_GEM_CREATE_SHADOW);
r = amdgpu_bo_create_restricted(adev, size, byte_align, true,
AMDGPU_GEM_DOMAIN_GTT,
AMDGPU_GEM_CREATE_CPU_GTT_USWC |
AMDGPU_GEM_CREATE_SHADOW,
NULL, &placement,
bo->tbo.resv,
0,
&bo->shadow);
r = amdgpu_bo_do_create(adev, size, byte_align, true,
AMDGPU_GEM_DOMAIN_GTT,
AMDGPU_GEM_CREATE_CPU_GTT_USWC |
AMDGPU_GEM_CREATE_SHADOW,
NULL, bo->tbo.resv, 0,
&bo->shadow);
if (!r) {
bo->shadow->parent = amdgpu_bo_ref(bo);
mutex_lock(&adev->shadow_list_lock);
@ -484,18 +452,11 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
uint64_t init_value,
struct amdgpu_bo **bo_ptr)
{
struct ttm_placement placement = {0};
struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1];
uint64_t parent_flags = flags & ~AMDGPU_GEM_CREATE_SHADOW;
int r;
memset(&placements, 0, sizeof(placements));
amdgpu_ttm_placement_init(adev, &placement, placements,
domain, parent_flags);
r = amdgpu_bo_create_restricted(adev, size, byte_align, kernel, domain,
parent_flags, sg, &placement, resv,
init_value, bo_ptr);
r = amdgpu_bo_do_create(adev, size, byte_align, kernel, domain,
parent_flags, sg, resv, init_value, bo_ptr);
if (r)
return r;
@ -672,7 +633,6 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
int r, i;
unsigned fpfn, lpfn;
if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
return -EPERM;
@ -704,22 +664,16 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
}
bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
/* force to pin into visible video ram */
if (!(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS))
bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
amdgpu_ttm_placement_from_domain(bo, domain);
for (i = 0; i < bo->placement.num_placement; i++) {
/* force to pin into visible video ram */
if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
!(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) &&
(!max_offset || max_offset >
adev->mc.visible_vram_size)) {
if (WARN_ON_ONCE(min_offset >
adev->mc.visible_vram_size))
return -EINVAL;
fpfn = min_offset >> PAGE_SHIFT;
lpfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
} else {
fpfn = min_offset >> PAGE_SHIFT;
lpfn = max_offset >> PAGE_SHIFT;
}
unsigned fpfn, lpfn;
fpfn = min_offset >> PAGE_SHIFT;
lpfn = max_offset >> PAGE_SHIFT;
if (fpfn > bo->placements[i].fpfn)
bo->placements[i].fpfn = fpfn;
if (!bo->placements[i].lpfn ||
@ -928,8 +882,8 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
if (!amdgpu_ttm_bo_is_amdgpu_bo(bo))
return;
abo = container_of(bo, struct amdgpu_bo, tbo);
amdgpu_vm_bo_invalidate(adev, abo);
abo = ttm_to_amdgpu_bo(bo);
amdgpu_vm_bo_invalidate(adev, abo, evict);
amdgpu_bo_kunmap(abo);
@ -955,7 +909,7 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
if (!amdgpu_ttm_bo_is_amdgpu_bo(bo))
return 0;
abo = container_of(bo, struct amdgpu_bo, tbo);
abo = ttm_to_amdgpu_bo(bo);
/* Remember that this BO was accessed by the CPU */
abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;

View File

@ -35,6 +35,7 @@
/* bo virtual addresses in a vm */
struct amdgpu_bo_va_mapping {
struct amdgpu_bo_va *bo_va;
struct list_head list;
struct rb_node rb;
uint64_t start;
@ -49,12 +50,17 @@ struct amdgpu_bo_va {
struct amdgpu_vm_bo_base base;
/* protected by bo being reserved */
struct dma_fence *last_pt_update;
unsigned ref_count;
/* all other members protected by the VM PD being reserved */
struct dma_fence *last_pt_update;
/* mappings for this bo_va */
struct list_head invalids;
struct list_head valids;
/* If the mappings are cleared or filled */
bool cleared;
};
struct amdgpu_bo {
@ -88,6 +94,11 @@ struct amdgpu_bo {
};
};
static inline struct amdgpu_bo *ttm_to_amdgpu_bo(struct ttm_buffer_object *tbo)
{
return container_of(tbo, struct amdgpu_bo, tbo);
}
/**
* amdgpu_mem_type_to_domain - return domain corresponding to mem_type
* @mem_type: ttm memory type
@ -182,6 +193,14 @@ static inline bool amdgpu_bo_gpu_accessible(struct amdgpu_bo *bo)
}
}
/**
* amdgpu_bo_explicit_sync - return whether the bo is explicitly synced
*/
static inline bool amdgpu_bo_explicit_sync(struct amdgpu_bo *bo)
{
return bo->flags & AMDGPU_GEM_CREATE_EXPLICIT_SYNC;
}
int amdgpu_bo_create(struct amdgpu_device *adev,
unsigned long size, int byte_align,
bool kernel, u32 domain, u64 flags,
@ -189,14 +208,6 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
struct reservation_object *resv,
uint64_t init_value,
struct amdgpu_bo **bo_ptr);
int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
unsigned long size, int byte_align,
bool kernel, u32 domain, u64 flags,
struct sg_table *sg,
struct ttm_placement *placement,
struct reservation_object *resv,
uint64_t init_value,
struct amdgpu_bo **bo_ptr);
int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
unsigned long size, int align,
u32 domain, struct amdgpu_bo **bo_ptr,

View File

@ -64,17 +64,13 @@ static const struct cg_flag_name clocks[] = {
void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
{
if (adev->pp_enabled)
/* TODO */
return;
if (adev->pm.dpm_enabled) {
mutex_lock(&adev->pm.mutex);
if (power_supply_is_system_supplied() > 0)
adev->pm.dpm.ac_power = true;
else
adev->pm.dpm.ac_power = false;
if (adev->pm.funcs->enable_bapm)
if (adev->powerplay.pp_funcs->enable_bapm)
amdgpu_dpm_enable_bapm(adev, adev->pm.dpm.ac_power);
mutex_unlock(&adev->pm.mutex);
}
@ -88,9 +84,9 @@ static ssize_t amdgpu_get_dpm_state(struct device *dev,
struct amdgpu_device *adev = ddev->dev_private;
enum amd_pm_state_type pm;
if (adev->pp_enabled) {
if (adev->powerplay.pp_funcs->get_current_power_state)
pm = amdgpu_dpm_get_current_power_state(adev);
} else
else
pm = adev->pm.dpm.user_state;
return snprintf(buf, PAGE_SIZE, "%s\n",
@ -118,8 +114,8 @@ static ssize_t amdgpu_set_dpm_state(struct device *dev,
goto fail;
}
if (adev->pp_enabled) {
amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_ENABLE_USER_STATE, &state, NULL);
if (adev->powerplay.pp_funcs->dispatch_tasks) {
amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_ENABLE_USER_STATE, &state, NULL);
} else {
mutex_lock(&adev->pm.mutex);
adev->pm.dpm.user_state = state;
@ -140,13 +136,17 @@ static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev,
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
enum amd_dpm_forced_level level;
enum amd_dpm_forced_level level = 0xff;
if ((adev->flags & AMD_IS_PX) &&
(ddev->switch_power_state != DRM_SWITCH_POWER_ON))
return snprintf(buf, PAGE_SIZE, "off\n");
level = amdgpu_dpm_get_performance_level(adev);
if (adev->powerplay.pp_funcs->get_performance_level)
level = amdgpu_dpm_get_performance_level(adev);
else
level = adev->pm.dpm.forced_level;
return snprintf(buf, PAGE_SIZE, "%s\n",
(level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" :
(level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" :
@ -167,7 +167,7 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
enum amd_dpm_forced_level level;
enum amd_dpm_forced_level current_level;
enum amd_dpm_forced_level current_level = 0xff;
int ret = 0;
/* Can't force performance level when the card is off */
@ -175,7 +175,8 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
(ddev->switch_power_state != DRM_SWITCH_POWER_ON))
return -EINVAL;
current_level = amdgpu_dpm_get_performance_level(adev);
if (adev->powerplay.pp_funcs->get_performance_level)
current_level = amdgpu_dpm_get_performance_level(adev);
if (strncmp("low", buf, strlen("low")) == 0) {
level = AMD_DPM_FORCED_LEVEL_LOW;
@ -203,9 +204,7 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
if (current_level == level)
return count;
if (adev->pp_enabled)
amdgpu_dpm_force_performance_level(adev, level);
else {
if (adev->powerplay.pp_funcs->force_performance_level) {
mutex_lock(&adev->pm.mutex);
if (adev->pm.dpm.thermal_active) {
count = -EINVAL;
@ -233,7 +232,7 @@ static ssize_t amdgpu_get_pp_num_states(struct device *dev,
struct pp_states_info data;
int i, buf_len;
if (adev->pp_enabled)
if (adev->powerplay.pp_funcs->get_pp_num_states)
amdgpu_dpm_get_pp_num_states(adev, &data);
buf_len = snprintf(buf, PAGE_SIZE, "states: %d\n", data.nums);
@ -257,8 +256,8 @@ static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
enum amd_pm_state_type pm = 0;
int i = 0;
if (adev->pp_enabled) {
if (adev->powerplay.pp_funcs->get_current_power_state
&& adev->powerplay.pp_funcs->get_pp_num_states) {
pm = amdgpu_dpm_get_current_power_state(adev);
amdgpu_dpm_get_pp_num_states(adev, &data);
@ -280,25 +279,10 @@ static ssize_t amdgpu_get_pp_force_state(struct device *dev,
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
struct pp_states_info data;
enum amd_pm_state_type pm = 0;
int i;
if (adev->pp_force_state_enabled && adev->pp_enabled) {
pm = amdgpu_dpm_get_current_power_state(adev);
amdgpu_dpm_get_pp_num_states(adev, &data);
for (i = 0; i < data.nums; i++) {
if (pm == data.states[i])
break;
}
if (i == data.nums)
i = -EINVAL;
return snprintf(buf, PAGE_SIZE, "%d\n", i);
} else
if (adev->pp_force_state_enabled)
return amdgpu_get_pp_cur_state(dev, attr, buf);
else
return snprintf(buf, PAGE_SIZE, "\n");
}
@ -315,7 +299,8 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev,
if (strlen(buf) == 1)
adev->pp_force_state_enabled = false;
else if (adev->pp_enabled) {
else if (adev->powerplay.pp_funcs->dispatch_tasks &&
adev->powerplay.pp_funcs->get_pp_num_states) {
struct pp_states_info data;
ret = kstrtoul(buf, 0, &idx);
@ -330,7 +315,7 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev,
if (state != POWER_STATE_TYPE_INTERNAL_BOOT &&
state != POWER_STATE_TYPE_DEFAULT) {
amdgpu_dpm_dispatch_task(adev,
AMD_PP_EVENT_ENABLE_USER_STATE, &state, NULL);
AMD_PP_TASK_ENABLE_USER_STATE, &state, NULL);
adev->pp_force_state_enabled = true;
}
}
@ -347,7 +332,7 @@ static ssize_t amdgpu_get_pp_table(struct device *dev,
char *table = NULL;
int size;
if (adev->pp_enabled)
if (adev->powerplay.pp_funcs->get_pp_table)
size = amdgpu_dpm_get_pp_table(adev, &table);
else
return 0;
@ -368,7 +353,7 @@ static ssize_t amdgpu_set_pp_table(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
if (adev->pp_enabled)
if (adev->powerplay.pp_funcs->set_pp_table)
amdgpu_dpm_set_pp_table(adev, buf, count);
return count;
@ -380,14 +365,11 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
ssize_t size = 0;
if (adev->pp_enabled)
size = amdgpu_dpm_print_clock_levels(adev, PP_SCLK, buf);
else if (adev->pm.funcs->print_clock_levels)
size = adev->pm.funcs->print_clock_levels(adev, PP_SCLK, buf);
return size;
if (adev->powerplay.pp_funcs->print_clock_levels)
return amdgpu_dpm_print_clock_levels(adev, PP_SCLK, buf);
else
return snprintf(buf, PAGE_SIZE, "\n");
}
static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
@ -416,10 +398,9 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
mask |= 1 << level;
}
if (adev->pp_enabled)
if (adev->powerplay.pp_funcs->force_clock_level)
amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask);
else if (adev->pm.funcs->force_clock_level)
adev->pm.funcs->force_clock_level(adev, PP_SCLK, mask);
fail:
return count;
}
@ -430,14 +411,11 @@ static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev,
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
ssize_t size = 0;
if (adev->pp_enabled)
size = amdgpu_dpm_print_clock_levels(adev, PP_MCLK, buf);
else if (adev->pm.funcs->print_clock_levels)
size = adev->pm.funcs->print_clock_levels(adev, PP_MCLK, buf);
return size;
if (adev->powerplay.pp_funcs->print_clock_levels)
return amdgpu_dpm_print_clock_levels(adev, PP_MCLK, buf);
else
return snprintf(buf, PAGE_SIZE, "\n");
}
static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
@ -465,11 +443,9 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
}
mask |= 1 << level;
}
if (adev->pp_enabled)
if (adev->powerplay.pp_funcs->force_clock_level)
amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask);
else if (adev->pm.funcs->force_clock_level)
adev->pm.funcs->force_clock_level(adev, PP_MCLK, mask);
fail:
return count;
}
@ -480,14 +456,11 @@ static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev,
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
ssize_t size = 0;
if (adev->pp_enabled)
size = amdgpu_dpm_print_clock_levels(adev, PP_PCIE, buf);
else if (adev->pm.funcs->print_clock_levels)
size = adev->pm.funcs->print_clock_levels(adev, PP_PCIE, buf);
return size;
if (adev->powerplay.pp_funcs->print_clock_levels)
return amdgpu_dpm_print_clock_levels(adev, PP_PCIE, buf);
else
return snprintf(buf, PAGE_SIZE, "\n");
}
static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
@ -515,11 +488,9 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
}
mask |= 1 << level;
}
if (adev->pp_enabled)
if (adev->powerplay.pp_funcs->force_clock_level)
amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask);
else if (adev->pm.funcs->force_clock_level)
adev->pm.funcs->force_clock_level(adev, PP_PCIE, mask);
fail:
return count;
}
@ -532,10 +503,8 @@ static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
struct amdgpu_device *adev = ddev->dev_private;
uint32_t value = 0;
if (adev->pp_enabled)
if (adev->powerplay.pp_funcs->get_sclk_od)
value = amdgpu_dpm_get_sclk_od(adev);
else if (adev->pm.funcs->get_sclk_od)
value = adev->pm.funcs->get_sclk_od(adev);
return snprintf(buf, PAGE_SIZE, "%d\n", value);
}
@ -556,12 +525,12 @@ static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
count = -EINVAL;
goto fail;
}
if (adev->pp_enabled) {
if (adev->powerplay.pp_funcs->set_sclk_od)
amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_READJUST_POWER_STATE, NULL, NULL);
} else if (adev->pm.funcs->set_sclk_od) {
adev->pm.funcs->set_sclk_od(adev, (uint32_t)value);
if (adev->powerplay.pp_funcs->dispatch_tasks) {
amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL, NULL);
} else {
adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
amdgpu_pm_compute_clocks(adev);
}
@ -578,10 +547,8 @@ static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
struct amdgpu_device *adev = ddev->dev_private;
uint32_t value = 0;
if (adev->pp_enabled)
if (adev->powerplay.pp_funcs->get_mclk_od)
value = amdgpu_dpm_get_mclk_od(adev);
else if (adev->pm.funcs->get_mclk_od)
value = adev->pm.funcs->get_mclk_od(adev);
return snprintf(buf, PAGE_SIZE, "%d\n", value);
}
@ -602,12 +569,12 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
count = -EINVAL;
goto fail;
}
if (adev->pp_enabled) {
if (adev->powerplay.pp_funcs->set_mclk_od)
amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_READJUST_POWER_STATE, NULL, NULL);
} else if (adev->pm.funcs->set_mclk_od) {
adev->pm.funcs->set_mclk_od(adev, (uint32_t)value);
if (adev->powerplay.pp_funcs->dispatch_tasks) {
amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL, NULL);
} else {
adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
amdgpu_pm_compute_clocks(adev);
}
@ -621,14 +588,11 @@ static ssize_t amdgpu_get_pp_power_profile(struct device *dev,
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
int ret = 0;
int ret = 0xff;
if (adev->pp_enabled)
if (adev->powerplay.pp_funcs->get_power_profile_state)
ret = amdgpu_dpm_get_power_profile_state(
adev, query);
else if (adev->pm.funcs->get_power_profile_state)
ret = adev->pm.funcs->get_power_profile_state(
adev, query);
if (ret)
return ret;
@ -675,15 +639,12 @@ static ssize_t amdgpu_set_pp_power_profile(struct device *dev,
char *sub_str, buf_cpy[128], *tmp_str;
const char delimiter[3] = {' ', '\n', '\0'};
long int value;
int ret = 0;
int ret = 0xff;
if (strncmp("reset", buf, strlen("reset")) == 0) {
if (adev->pp_enabled)
if (adev->powerplay.pp_funcs->reset_power_profile_state)
ret = amdgpu_dpm_reset_power_profile_state(
adev, request);
else if (adev->pm.funcs->reset_power_profile_state)
ret = adev->pm.funcs->reset_power_profile_state(
adev, request);
if (ret) {
count = -EINVAL;
goto fail;
@ -692,12 +653,10 @@ static ssize_t amdgpu_set_pp_power_profile(struct device *dev,
}
if (strncmp("set", buf, strlen("set")) == 0) {
if (adev->pp_enabled)
if (adev->powerplay.pp_funcs->set_power_profile_state)
ret = amdgpu_dpm_set_power_profile_state(
adev, request);
else if (adev->pm.funcs->set_power_profile_state)
ret = adev->pm.funcs->set_power_profile_state(
adev, request);
if (ret) {
count = -EINVAL;
goto fail;
@ -745,13 +704,8 @@ static ssize_t amdgpu_set_pp_power_profile(struct device *dev,
loop++;
}
if (adev->pp_enabled)
ret = amdgpu_dpm_set_power_profile_state(
adev, request);
else if (adev->pm.funcs->set_power_profile_state)
ret = adev->pm.funcs->set_power_profile_state(
adev, request);
if (adev->powerplay.pp_funcs->set_power_profile_state)
ret = amdgpu_dpm_set_power_profile_state(adev, request);
if (ret)
count = -EINVAL;
@ -831,7 +785,7 @@ static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
(ddev->switch_power_state != DRM_SWITCH_POWER_ON))
return -EINVAL;
if (!adev->pp_enabled && !adev->pm.funcs->get_temperature)
if (!adev->powerplay.pp_funcs->get_temperature)
temp = 0;
else
temp = amdgpu_dpm_get_temperature(adev);
@ -862,7 +816,7 @@ static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
struct amdgpu_device *adev = dev_get_drvdata(dev);
u32 pwm_mode = 0;
if (!adev->pp_enabled && !adev->pm.funcs->get_fan_control_mode)
if (!adev->powerplay.pp_funcs->get_fan_control_mode)
return -EINVAL;
pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
@ -879,7 +833,7 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
int err;
int value;
if (!adev->pp_enabled && !adev->pm.funcs->set_fan_control_mode)
if (!adev->powerplay.pp_funcs->set_fan_control_mode)
return -EINVAL;
err = kstrtoint(buf, 10, &value);
@ -919,9 +873,11 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
value = (value * 100) / 255;
err = amdgpu_dpm_set_fan_speed_percent(adev, value);
if (err)
return err;
if (adev->powerplay.pp_funcs->set_fan_speed_percent) {
err = amdgpu_dpm_set_fan_speed_percent(adev, value);
if (err)
return err;
}
return count;
}
@ -932,11 +888,13 @@ static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
int err;
u32 speed;
u32 speed = 0;
err = amdgpu_dpm_get_fan_speed_percent(adev, &speed);
if (err)
return err;
if (adev->powerplay.pp_funcs->get_fan_speed_percent) {
err = amdgpu_dpm_get_fan_speed_percent(adev, &speed);
if (err)
return err;
}
speed = (speed * 255) / 100;
@ -949,11 +907,13 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
int err;
u32 speed;
u32 speed = 0;
err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed);
if (err)
return err;
if (adev->powerplay.pp_funcs->get_fan_speed_rpm) {
err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed);
if (err)
return err;
}
return sprintf(buf, "%i\n", speed);
}
@ -996,9 +956,6 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
return 0;
if (adev->pp_enabled)
return effective_mode;
/* Skip fan attributes if fan is not present */
if (adev->pm.no_fan &&
(attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
@ -1008,21 +965,21 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
return 0;
/* mask fan attributes if we have no bindings for this asic to expose */
if ((!adev->pm.funcs->get_fan_speed_percent &&
if ((!adev->powerplay.pp_funcs->get_fan_speed_percent &&
attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */
(!adev->pm.funcs->get_fan_control_mode &&
(!adev->powerplay.pp_funcs->get_fan_control_mode &&
attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */
effective_mode &= ~S_IRUGO;
if ((!adev->pm.funcs->set_fan_speed_percent &&
if ((!adev->powerplay.pp_funcs->set_fan_speed_percent &&
attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */
(!adev->pm.funcs->set_fan_control_mode &&
(!adev->powerplay.pp_funcs->set_fan_control_mode &&
attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */
effective_mode &= ~S_IWUSR;
/* hide max/min values if we can't both query and manage the fan */
if ((!adev->pm.funcs->set_fan_speed_percent &&
!adev->pm.funcs->get_fan_speed_percent) &&
if ((!adev->powerplay.pp_funcs->set_fan_speed_percent &&
!adev->powerplay.pp_funcs->get_fan_speed_percent) &&
(attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
return 0;
@ -1055,7 +1012,7 @@ void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
if (!adev->pm.dpm_enabled)
return;
if (adev->pm.funcs->get_temperature) {
if (adev->powerplay.pp_funcs->get_temperature) {
int temp = amdgpu_dpm_get_temperature(adev);
if (temp < adev->pm.dpm.thermal.min_temp)
@ -1087,7 +1044,7 @@ static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev,
true : false;
/* check if the vblank period is too short to adjust the mclk */
if (single_display && adev->pm.funcs->vblank_too_short) {
if (single_display && adev->powerplay.pp_funcs->vblank_too_short) {
if (amdgpu_dpm_vblank_too_short(adev))
single_display = false;
}
@ -1216,7 +1173,7 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
struct amdgpu_ps *ps;
enum amd_pm_state_type dpm_state;
int ret;
bool equal;
bool equal = false;
/* if dpm init failed */
if (!adev->pm.dpm_enabled)
@ -1236,7 +1193,7 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
else
return;
if (amdgpu_dpm == 1) {
if (amdgpu_dpm == 1 && adev->powerplay.pp_funcs->print_power_state) {
printk("switching from power state:\n");
amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps);
printk("switching to power state:\n");
@ -1245,15 +1202,17 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
/* update whether vce is active */
ps->vce_active = adev->pm.dpm.vce_active;
amdgpu_dpm_display_configuration_changed(adev);
if (adev->powerplay.pp_funcs->display_configuration_changed)
amdgpu_dpm_display_configuration_changed(adev);
ret = amdgpu_dpm_pre_set_power_state(adev);
if (ret)
return;
if ((0 != amgdpu_dpm_check_state_equal(adev, adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal)))
equal = false;
if (adev->powerplay.pp_funcs->check_state_equal) {
if (0 != amdgpu_dpm_check_state_equal(adev, adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal))
equal = false;
}
if (equal)
return;
@ -1264,7 +1223,7 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
if (adev->pm.funcs->force_performance_level) {
if (adev->powerplay.pp_funcs->force_performance_level) {
if (adev->pm.dpm.thermal_active) {
enum amd_dpm_forced_level level = adev->pm.dpm.forced_level;
/* force low perf level for thermal */
@ -1280,7 +1239,7 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
{
if (adev->pp_enabled || adev->pm.funcs->powergate_uvd) {
if (adev->powerplay.pp_funcs->powergate_uvd) {
/* enable/disable UVD */
mutex_lock(&adev->pm.mutex);
amdgpu_dpm_powergate_uvd(adev, !enable);
@ -1302,7 +1261,7 @@ void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
{
if (adev->pp_enabled || adev->pm.funcs->powergate_vce) {
if (adev->powerplay.pp_funcs->powergate_vce) {
/* enable/disable VCE */
mutex_lock(&adev->pm.mutex);
amdgpu_dpm_powergate_vce(adev, !enable);
@ -1337,8 +1296,7 @@ void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
{
int i;
if (adev->pp_enabled)
/* TO DO */
if (adev->powerplay.pp_funcs->print_power_state == NULL)
return;
for (i = 0; i < adev->pm.dpm.num_ps; i++)
@ -1353,10 +1311,11 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
if (adev->pm.sysfs_initialized)
return 0;
if (!adev->pp_enabled) {
if (adev->pm.funcs->get_temperature == NULL)
return 0;
}
if (adev->pm.dpm_enabled == 0)
return 0;
if (adev->powerplay.pp_funcs->get_temperature == NULL)
return 0;
adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev,
DRIVER_NAME, adev,
@ -1379,27 +1338,26 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
return ret;
}
if (adev->pp_enabled) {
ret = device_create_file(adev->dev, &dev_attr_pp_num_states);
if (ret) {
DRM_ERROR("failed to create device file pp_num_states\n");
return ret;
}
ret = device_create_file(adev->dev, &dev_attr_pp_cur_state);
if (ret) {
DRM_ERROR("failed to create device file pp_cur_state\n");
return ret;
}
ret = device_create_file(adev->dev, &dev_attr_pp_force_state);
if (ret) {
DRM_ERROR("failed to create device file pp_force_state\n");
return ret;
}
ret = device_create_file(adev->dev, &dev_attr_pp_table);
if (ret) {
DRM_ERROR("failed to create device file pp_table\n");
return ret;
}
ret = device_create_file(adev->dev, &dev_attr_pp_num_states);
if (ret) {
DRM_ERROR("failed to create device file pp_num_states\n");
return ret;
}
ret = device_create_file(adev->dev, &dev_attr_pp_cur_state);
if (ret) {
DRM_ERROR("failed to create device file pp_cur_state\n");
return ret;
}
ret = device_create_file(adev->dev, &dev_attr_pp_force_state);
if (ret) {
DRM_ERROR("failed to create device file pp_force_state\n");
return ret;
}
ret = device_create_file(adev->dev, &dev_attr_pp_table);
if (ret) {
DRM_ERROR("failed to create device file pp_table\n");
return ret;
}
ret = device_create_file(adev->dev, &dev_attr_pp_dpm_sclk);
@ -1455,16 +1413,19 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
{
if (adev->pm.dpm_enabled == 0)
return;
if (adev->pm.int_hwmon_dev)
hwmon_device_unregister(adev->pm.int_hwmon_dev);
device_remove_file(adev->dev, &dev_attr_power_dpm_state);
device_remove_file(adev->dev, &dev_attr_power_dpm_force_performance_level);
if (adev->pp_enabled) {
device_remove_file(adev->dev, &dev_attr_pp_num_states);
device_remove_file(adev->dev, &dev_attr_pp_cur_state);
device_remove_file(adev->dev, &dev_attr_pp_force_state);
device_remove_file(adev->dev, &dev_attr_pp_table);
}
device_remove_file(adev->dev, &dev_attr_pp_num_states);
device_remove_file(adev->dev, &dev_attr_pp_cur_state);
device_remove_file(adev->dev, &dev_attr_pp_force_state);
device_remove_file(adev->dev, &dev_attr_pp_table);
device_remove_file(adev->dev, &dev_attr_pp_dpm_sclk);
device_remove_file(adev->dev, &dev_attr_pp_dpm_mclk);
device_remove_file(adev->dev, &dev_attr_pp_dpm_pcie);
@ -1495,8 +1456,8 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
amdgpu_fence_wait_empty(ring);
}
if (adev->pp_enabled) {
amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_DISPLAY_CONFIG_CHANGE, NULL, NULL);
if (adev->powerplay.pp_funcs->dispatch_tasks) {
amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, NULL, NULL);
} else {
mutex_lock(&adev->pm.mutex);
adev->pm.dpm.new_active_crtcs = 0;
@ -1630,15 +1591,15 @@ static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
if ((adev->flags & AMD_IS_PX) &&
(ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
seq_printf(m, "PX asic powered off\n");
} else if (adev->pp_enabled) {
return amdgpu_debugfs_pm_info_pp(m, adev);
} else {
} else if (adev->powerplay.pp_funcs->debugfs_print_current_performance_level) {
mutex_lock(&adev->pm.mutex);
if (adev->pm.funcs->debugfs_print_current_performance_level)
adev->pm.funcs->debugfs_print_current_performance_level(adev, m);
if (adev->powerplay.pp_funcs->debugfs_print_current_performance_level)
adev->powerplay.pp_funcs->debugfs_print_current_performance_level(adev, m);
else
seq_printf(m, "Debugfs support not implemented for this asic\n");
mutex_unlock(&adev->pm.mutex);
} else {
return amdgpu_debugfs_pm_info_pp(m, adev);
}
return 0;

View File

@ -34,24 +34,6 @@
#include "cik_dpm.h"
#include "vi_dpm.h"
static int amdgpu_create_pp_handle(struct amdgpu_device *adev)
{
struct amd_pp_init pp_init;
struct amd_powerplay *amd_pp;
int ret;
amd_pp = &(adev->powerplay);
pp_init.chip_family = adev->family;
pp_init.chip_id = adev->asic_type;
pp_init.pm_en = (amdgpu_dpm != 0 && !amdgpu_sriov_vf(adev)) ? true : false;
pp_init.feature_mask = amdgpu_pp_feature_mask;
pp_init.device = amdgpu_cgs_create_device(adev);
ret = amd_powerplay_create(&pp_init, &(amd_pp->pp_handle));
if (ret)
return -EINVAL;
return 0;
}
static int amdgpu_pp_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@ -59,7 +41,6 @@ static int amdgpu_pp_early_init(void *handle)
int ret = 0;
amd_pp = &(adev->powerplay);
adev->pp_enabled = false;
amd_pp->pp_handle = (void *)adev;
switch (adev->asic_type) {
@ -73,9 +54,7 @@ static int amdgpu_pp_early_init(void *handle)
case CHIP_STONEY:
case CHIP_VEGA10:
case CHIP_RAVEN:
adev->pp_enabled = true;
if (amdgpu_create_pp_handle(adev))
return -EINVAL;
amd_pp->cgs_device = amdgpu_cgs_create_device(adev);
amd_pp->ip_funcs = &pp_ip_funcs;
amd_pp->pp_funcs = &pp_dpm_funcs;
break;
@ -87,17 +66,26 @@ static int amdgpu_pp_early_init(void *handle)
case CHIP_OLAND:
case CHIP_HAINAN:
amd_pp->ip_funcs = &si_dpm_ip_funcs;
amd_pp->pp_funcs = &si_dpm_funcs;
break;
#endif
#ifdef CONFIG_DRM_AMDGPU_CIK
case CHIP_BONAIRE:
case CHIP_HAWAII:
amd_pp->ip_funcs = &ci_dpm_ip_funcs;
if (amdgpu_dpm == -1) {
amd_pp->ip_funcs = &ci_dpm_ip_funcs;
amd_pp->pp_funcs = &ci_dpm_funcs;
} else {
amd_pp->cgs_device = amdgpu_cgs_create_device(adev);
amd_pp->ip_funcs = &pp_ip_funcs;
amd_pp->pp_funcs = &pp_dpm_funcs;
}
break;
case CHIP_KABINI:
case CHIP_MULLINS:
case CHIP_KAVERI:
amd_pp->ip_funcs = &kv_dpm_ip_funcs;
amd_pp->pp_funcs = &kv_dpm_funcs;
break;
#endif
default:
@ -107,12 +95,9 @@ static int amdgpu_pp_early_init(void *handle)
if (adev->powerplay.ip_funcs->early_init)
ret = adev->powerplay.ip_funcs->early_init(
adev->powerplay.pp_handle);
amd_pp->cgs_device ? amd_pp->cgs_device :
amd_pp->pp_handle);
if (ret == PP_DPM_DISABLED) {
adev->pm.dpm_enabled = false;
return 0;
}
return ret;
}
@ -126,11 +111,6 @@ static int amdgpu_pp_late_init(void *handle)
ret = adev->powerplay.ip_funcs->late_init(
adev->powerplay.pp_handle);
if (adev->pp_enabled && adev->pm.dpm_enabled) {
amdgpu_pm_sysfs_init(adev);
amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_COMPLETE_INIT, NULL, NULL);
}
return ret;
}
@ -165,21 +145,13 @@ static int amdgpu_pp_hw_init(void *handle)
int ret = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->pp_enabled && adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
amdgpu_ucode_init_bo(adev);
if (adev->powerplay.ip_funcs->hw_init)
ret = adev->powerplay.ip_funcs->hw_init(
adev->powerplay.pp_handle);
if (ret == PP_DPM_DISABLED) {
adev->pm.dpm_enabled = false;
return 0;
}
if ((amdgpu_dpm != 0) && !amdgpu_sriov_vf(adev))
adev->pm.dpm_enabled = true;
return ret;
}
@ -188,14 +160,11 @@ static int amdgpu_pp_hw_fini(void *handle)
int ret = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->pp_enabled && adev->pm.dpm_enabled)
amdgpu_pm_sysfs_fini(adev);
if (adev->powerplay.ip_funcs->hw_fini)
ret = adev->powerplay.ip_funcs->hw_fini(
adev->powerplay.pp_handle);
if (adev->pp_enabled && adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
amdgpu_ucode_fini_bo(adev);
return ret;
@ -209,9 +178,8 @@ static void amdgpu_pp_late_fini(void *handle)
adev->powerplay.ip_funcs->late_fini(
adev->powerplay.pp_handle);
if (adev->pp_enabled)
amd_powerplay_destroy(adev->powerplay.pp_handle);
if (adev->powerplay.cgs_device)
amdgpu_cgs_destroy_device(adev->powerplay.cgs_device);
}
static int amdgpu_pp_suspend(void *handle)

View File

@ -57,6 +57,40 @@ void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
ttm_bo_kunmap(&bo->dma_buf_vmap);
}
int amdgpu_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
{
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
unsigned asize = amdgpu_bo_size(bo);
int ret;
if (!vma->vm_file)
return -ENODEV;
if (adev == NULL)
return -ENODEV;
/* Check for valid size. */
if (asize < vma->vm_end - vma->vm_start)
return -EINVAL;
if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) ||
(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
return -EPERM;
}
vma->vm_pgoff += amdgpu_bo_mmap_offset(bo) >> PAGE_SHIFT;
/* prime mmap does not need to check access, so allow here */
ret = drm_vma_node_allow(&obj->vma_node, vma->vm_file->private_data);
if (ret)
return ret;
ret = ttm_bo_mmap(vma->vm_file, vma, &adev->mman.bdev);
drm_vma_node_revoke(&obj->vma_node, vma->vm_file->private_data);
return ret;
}
struct drm_gem_object *
amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach,
@ -136,7 +170,8 @@ struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
{
struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) ||
bo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
return ERR_PTR(-EPERM);
return drm_gem_prime_export(dev, gobj, flags);

View File

@ -57,21 +57,23 @@ static int psp_sw_init(void *handle)
psp->prep_cmd_buf = psp_v3_1_prep_cmd_buf;
psp->ring_init = psp_v3_1_ring_init;
psp->ring_create = psp_v3_1_ring_create;
psp->ring_stop = psp_v3_1_ring_stop;
psp->ring_destroy = psp_v3_1_ring_destroy;
psp->cmd_submit = psp_v3_1_cmd_submit;
psp->compare_sram_data = psp_v3_1_compare_sram_data;
psp->smu_reload_quirk = psp_v3_1_smu_reload_quirk;
psp->mode1_reset = psp_v3_1_mode1_reset;
break;
case CHIP_RAVEN:
#if 0
psp->init_microcode = psp_v10_0_init_microcode;
#endif
psp->prep_cmd_buf = psp_v10_0_prep_cmd_buf;
psp->ring_init = psp_v10_0_ring_init;
psp->ring_create = psp_v10_0_ring_create;
psp->ring_stop = psp_v10_0_ring_stop;
psp->ring_destroy = psp_v10_0_ring_destroy;
psp->cmd_submit = psp_v10_0_cmd_submit;
psp->compare_sram_data = psp_v10_0_compare_sram_data;
psp->mode1_reset = psp_v10_0_mode1_reset;
break;
default:
return -EINVAL;
@ -90,6 +92,12 @@ static int psp_sw_init(void *handle)
static int psp_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
release_firmware(adev->psp.sos_fw);
adev->psp.sos_fw = NULL;
release_firmware(adev->psp.asd_fw);
adev->psp.asd_fw = NULL;
return 0;
}
@ -253,15 +261,18 @@ static int psp_asd_load(struct psp_context *psp)
static int psp_hw_start(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
int ret;
ret = psp_bootloader_load_sysdrv(psp);
if (ret)
return ret;
if (!amdgpu_sriov_vf(adev) || !adev->in_sriov_reset) {
ret = psp_bootloader_load_sysdrv(psp);
if (ret)
return ret;
ret = psp_bootloader_load_sos(psp);
if (ret)
return ret;
ret = psp_bootloader_load_sos(psp);
if (ret)
return ret;
}
ret = psp_ring_create(psp, PSP_RING_TYPE__KM);
if (ret)
@ -453,6 +464,16 @@ static int psp_hw_fini(void *handle)
static int psp_suspend(void *handle)
{
int ret;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct psp_context *psp = &adev->psp;
ret = psp_ring_stop(psp, PSP_RING_TYPE__KM);
if (ret) {
DRM_ERROR("PSP ring stop failed\n");
return ret;
}
return 0;
}
@ -487,6 +508,22 @@ failed:
return ret;
}
static bool psp_check_reset(void* handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->flags & AMD_IS_APU)
return true;
return false;
}
static int psp_reset(void* handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
return psp_mode1_reset(&adev->psp);
}
static bool psp_check_fw_loading_status(struct amdgpu_device *adev,
enum AMDGPU_UCODE_ID ucode_type)
{
@ -530,8 +567,9 @@ const struct amd_ip_funcs psp_ip_funcs = {
.suspend = psp_suspend,
.resume = psp_resume,
.is_idle = NULL,
.check_soft_reset = psp_check_reset,
.wait_for_idle = NULL,
.soft_reset = NULL,
.soft_reset = psp_reset,
.set_clockgating_state = psp_set_clockgating_state,
.set_powergating_state = psp_set_powergating_state,
};

View File

@ -66,6 +66,8 @@ struct psp_context
struct psp_gfx_cmd_resp *cmd);
int (*ring_init)(struct psp_context *psp, enum psp_ring_type ring_type);
int (*ring_create)(struct psp_context *psp, enum psp_ring_type ring_type);
int (*ring_stop)(struct psp_context *psp,
enum psp_ring_type ring_type);
int (*ring_destroy)(struct psp_context *psp,
enum psp_ring_type ring_type);
int (*cmd_submit)(struct psp_context *psp, struct amdgpu_firmware_info *ucode,
@ -74,6 +76,7 @@ struct psp_context
struct amdgpu_firmware_info *ucode,
enum AMDGPU_UCODE_ID ucode_type);
bool (*smu_reload_quirk)(struct psp_context *psp);
int (*mode1_reset)(struct psp_context *psp);
/* fence buffer */
struct amdgpu_bo *fw_pri_bo;
@ -123,6 +126,7 @@ struct amdgpu_psp_funcs {
#define psp_prep_cmd_buf(ucode, type) (psp)->prep_cmd_buf((ucode), (type))
#define psp_ring_init(psp, type) (psp)->ring_init((psp), (type))
#define psp_ring_create(psp, type) (psp)->ring_create((psp), (type))
#define psp_ring_stop(psp, type) (psp)->ring_stop((psp), (type))
#define psp_ring_destroy(psp, type) ((psp)->ring_destroy((psp), (type)))
#define psp_cmd_submit(psp, ucode, cmd_mc, fence_mc, index) \
(psp)->cmd_submit((psp), (ucode), (cmd_mc), (fence_mc), (index))
@ -136,6 +140,8 @@ struct amdgpu_psp_funcs {
((psp)->bootloader_load_sos ? (psp)->bootloader_load_sos((psp)) : 0)
#define psp_smu_reload_quirk(psp) \
((psp)->smu_reload_quirk ? (psp)->smu_reload_quirk((psp)) : false)
#define psp_mode1_reset(psp) \
((psp)->mode1_reset ? (psp)->mode1_reset((psp)) : false)
extern const struct amd_ip_funcs psp_ip_funcs;

View File

@ -121,7 +121,7 @@ static enum amdgpu_ring_type amdgpu_hw_ip_to_ring_type(int hw_ip)
static int amdgpu_lru_map(struct amdgpu_device *adev,
struct amdgpu_queue_mapper *mapper,
int user_ring,
int user_ring, bool lru_pipe_order,
struct amdgpu_ring **out_ring)
{
int r, i, j;
@ -139,7 +139,7 @@ static int amdgpu_lru_map(struct amdgpu_device *adev,
}
r = amdgpu_ring_lru_get(adev, ring_type, ring_blacklist,
j, out_ring);
j, lru_pipe_order, out_ring);
if (r)
return r;
@ -284,8 +284,10 @@ int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
r = amdgpu_identity_map(adev, mapper, ring, out_ring);
break;
case AMDGPU_HW_IP_DMA:
r = amdgpu_lru_map(adev, mapper, ring, false, out_ring);
break;
case AMDGPU_HW_IP_COMPUTE:
r = amdgpu_lru_map(adev, mapper, ring, out_ring);
r = amdgpu_lru_map(adev, mapper, ring, true, out_ring);
break;
default:
*out_ring = NULL;

View File

@ -136,7 +136,8 @@ void amdgpu_ring_commit(struct amdgpu_ring *ring)
if (ring->funcs->end_use)
ring->funcs->end_use(ring);
amdgpu_ring_lru_touch(ring->adev, ring);
if (ring->funcs->type != AMDGPU_RING_TYPE_KIQ)
amdgpu_ring_lru_touch(ring->adev, ring);
}
/**
@ -154,6 +155,75 @@ void amdgpu_ring_undo(struct amdgpu_ring *ring)
ring->funcs->end_use(ring);
}
/**
* amdgpu_ring_priority_put - restore a ring's priority
*
* @ring: amdgpu_ring structure holding the information
* @priority: target priority
*
* Release a request for executing at @priority
*/
void amdgpu_ring_priority_put(struct amdgpu_ring *ring,
enum amd_sched_priority priority)
{
int i;
if (!ring->funcs->set_priority)
return;
if (atomic_dec_return(&ring->num_jobs[priority]) > 0)
return;
/* no need to restore if the job is already at the lowest priority */
if (priority == AMD_SCHED_PRIORITY_NORMAL)
return;
mutex_lock(&ring->priority_mutex);
/* something higher prio is executing, no need to decay */
if (ring->priority > priority)
goto out_unlock;
/* decay priority to the next level with a job available */
for (i = priority; i >= AMD_SCHED_PRIORITY_MIN; i--) {
if (i == AMD_SCHED_PRIORITY_NORMAL
|| atomic_read(&ring->num_jobs[i])) {
ring->priority = i;
ring->funcs->set_priority(ring, i);
break;
}
}
out_unlock:
mutex_unlock(&ring->priority_mutex);
}
/**
* amdgpu_ring_priority_get - change the ring's priority
*
* @ring: amdgpu_ring structure holding the information
* @priority: target priority
*
* Request a ring's priority to be raised to @priority (refcounted).
*/
void amdgpu_ring_priority_get(struct amdgpu_ring *ring,
enum amd_sched_priority priority)
{
if (!ring->funcs->set_priority)
return;
atomic_inc(&ring->num_jobs[priority]);
mutex_lock(&ring->priority_mutex);
if (priority <= ring->priority)
goto out_unlock;
ring->priority = priority;
ring->funcs->set_priority(ring, priority);
out_unlock:
mutex_unlock(&ring->priority_mutex);
}
/**
* amdgpu_ring_init - init driver ring struct.
*
@ -169,7 +239,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
unsigned max_dw, struct amdgpu_irq_src *irq_src,
unsigned irq_type)
{
int r;
int r, i;
int sched_hw_submission = amdgpu_sched_hw_submission;
/* Set the hw submission limit higher for KIQ because
@ -247,9 +317,14 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
}
ring->max_dw = max_dw;
ring->priority = AMD_SCHED_PRIORITY_NORMAL;
mutex_init(&ring->priority_mutex);
INIT_LIST_HEAD(&ring->lru_list);
amdgpu_ring_lru_touch(adev, ring);
for (i = 0; i < AMD_SCHED_PRIORITY_MAX; ++i)
atomic_set(&ring->num_jobs[i], 0);
if (amdgpu_debugfs_ring_init(adev, ring)) {
DRM_ERROR("Failed to register debugfs file for rings !\n");
}
@ -315,14 +390,16 @@ static bool amdgpu_ring_is_blacklisted(struct amdgpu_ring *ring,
* @type: amdgpu_ring_type enum
* @blacklist: blacklisted ring ids array
* @num_blacklist: number of entries in @blacklist
* @lru_pipe_order: find a ring from the least recently used pipe
* @ring: output ring
*
* Retrieve the amdgpu_ring structure for the least recently used ring of
* a specific IP block (all asics).
* Returns 0 on success, error on failure.
*/
int amdgpu_ring_lru_get(struct amdgpu_device *adev, int type, int *blacklist,
int num_blacklist, struct amdgpu_ring **ring)
int amdgpu_ring_lru_get(struct amdgpu_device *adev, int type,
int *blacklist, int num_blacklist,
bool lru_pipe_order, struct amdgpu_ring **ring)
{
struct amdgpu_ring *entry;
@ -337,10 +414,23 @@ int amdgpu_ring_lru_get(struct amdgpu_device *adev, int type, int *blacklist,
if (amdgpu_ring_is_blacklisted(entry, blacklist, num_blacklist))
continue;
*ring = entry;
amdgpu_ring_lru_touch_locked(adev, *ring);
break;
if (!*ring) {
*ring = entry;
/* We are done for ring LRU */
if (!lru_pipe_order)
break;
}
/* Move all rings on the same pipe to the end of the list */
if (entry->pipe == (*ring)->pipe)
amdgpu_ring_lru_touch_locked(adev, entry);
}
/* Move the ring we found to the end of the list */
if (*ring)
amdgpu_ring_lru_touch_locked(adev, *ring);
spin_unlock(&adev->ring_lru_list_lock);
if (!*ring) {

View File

@ -24,6 +24,7 @@
#ifndef __AMDGPU_RING_H__
#define __AMDGPU_RING_H__
#include <drm/amdgpu_drm.h>
#include "gpu_scheduler.h"
/* max number of rings */
@ -56,6 +57,7 @@ struct amdgpu_device;
struct amdgpu_ring;
struct amdgpu_ib;
struct amdgpu_cs_parser;
struct amdgpu_job;
/*
* Fences.
@ -88,8 +90,12 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
void amdgpu_fence_driver_suspend(struct amdgpu_device *adev);
void amdgpu_fence_driver_resume(struct amdgpu_device *adev);
int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **fence);
int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s);
void amdgpu_fence_process(struct amdgpu_ring *ring);
int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
signed long amdgpu_fence_wait_polling(struct amdgpu_ring *ring,
uint32_t wait_seq,
signed long timeout);
unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);
/*
@ -147,6 +153,9 @@ struct amdgpu_ring_funcs {
void (*emit_rreg)(struct amdgpu_ring *ring, uint32_t reg);
void (*emit_wreg)(struct amdgpu_ring *ring, uint32_t reg, uint32_t val);
void (*emit_tmz)(struct amdgpu_ring *ring, bool start);
/* priority functions */
void (*set_priority) (struct amdgpu_ring *ring,
enum amd_sched_priority priority);
};
struct amdgpu_ring {
@ -187,6 +196,12 @@ struct amdgpu_ring {
volatile u32 *cond_exe_cpu_addr;
unsigned vm_inv_eng;
bool has_compute_vm_bug;
atomic_t num_jobs[AMD_SCHED_PRIORITY_MAX];
struct mutex priority_mutex;
/* protected by priority_mutex */
int priority;
#if defined(CONFIG_DEBUG_FS)
struct dentry *ent;
#endif
@ -197,12 +212,17 @@ void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count);
void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
void amdgpu_ring_commit(struct amdgpu_ring *ring);
void amdgpu_ring_undo(struct amdgpu_ring *ring);
void amdgpu_ring_priority_get(struct amdgpu_ring *ring,
enum amd_sched_priority priority);
void amdgpu_ring_priority_put(struct amdgpu_ring *ring,
enum amd_sched_priority priority);
int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
unsigned ring_size, struct amdgpu_irq_src *irq_src,
unsigned irq_type);
void amdgpu_ring_fini(struct amdgpu_ring *ring);
int amdgpu_ring_lru_get(struct amdgpu_device *adev, int type, int *blacklist,
int num_blacklist, struct amdgpu_ring **ring);
int amdgpu_ring_lru_get(struct amdgpu_device *adev, int type,
int *blacklist, int num_blacklist,
bool lru_pipe_order, struct amdgpu_ring **ring);
void amdgpu_ring_lru_touch(struct amdgpu_device *adev, struct amdgpu_ring *ring);
static inline void amdgpu_ring_clear_ring(struct amdgpu_ring *ring)
{

View File

@ -0,0 +1,109 @@
/*
* Copyright 2017 Valve Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Andres Rodriguez <andresx7@gmail.com>
*/
#include <linux/fdtable.h>
#include <linux/pid.h>
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
#include "amdgpu_vm.h"
enum amd_sched_priority amdgpu_to_sched_priority(int amdgpu_priority)
{
switch (amdgpu_priority) {
case AMDGPU_CTX_PRIORITY_VERY_HIGH:
return AMD_SCHED_PRIORITY_HIGH_HW;
case AMDGPU_CTX_PRIORITY_HIGH:
return AMD_SCHED_PRIORITY_HIGH_SW;
case AMDGPU_CTX_PRIORITY_NORMAL:
return AMD_SCHED_PRIORITY_NORMAL;
case AMDGPU_CTX_PRIORITY_LOW:
case AMDGPU_CTX_PRIORITY_VERY_LOW:
return AMD_SCHED_PRIORITY_LOW;
case AMDGPU_CTX_PRIORITY_UNSET:
return AMD_SCHED_PRIORITY_UNSET;
default:
WARN(1, "Invalid context priority %d\n", amdgpu_priority);
return AMD_SCHED_PRIORITY_INVALID;
}
}
static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev,
int fd,
enum amd_sched_priority priority)
{
struct file *filp = fcheck(fd);
struct drm_file *file;
struct pid *pid;
struct amdgpu_fpriv *fpriv;
struct amdgpu_ctx *ctx;
uint32_t id;
if (!filp)
return -EINVAL;
pid = get_pid(((struct drm_file *)filp->private_data)->pid);
mutex_lock(&adev->ddev->filelist_mutex);
list_for_each_entry(file, &adev->ddev->filelist, lhead) {
if (file->pid != pid)
continue;
fpriv = file->driver_priv;
idr_for_each_entry(&fpriv->ctx_mgr.ctx_handles, ctx, id)
amdgpu_ctx_priority_override(ctx, priority);
}
mutex_unlock(&adev->ddev->filelist_mutex);
put_pid(pid);
return 0;
}
int amdgpu_sched_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
union drm_amdgpu_sched *args = data;
struct amdgpu_device *adev = dev->dev_private;
enum amd_sched_priority priority;
int r;
priority = amdgpu_to_sched_priority(args->in.priority);
if (args->in.flags || priority == AMD_SCHED_PRIORITY_INVALID)
return -EINVAL;
switch (args->in.op) {
case AMDGPU_SCHED_OP_PROCESS_PRIORITY_OVERRIDE:
r = amdgpu_sched_process_priority_override(adev,
args->in.fd,
priority);
break;
default:
DRM_ERROR("Invalid sched op specified: %d\n", args->in.op);
r = -EINVAL;
break;
}
return r;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright 2015 Advanced Micro Devices, Inc.
* Copyright 2017 Valve Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@ -19,16 +19,16 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Andres Rodriguez <andresx7@gmail.com>
*/
#ifndef _EVENTINIT_H_
#define _EVENTINIT_H_
#ifndef __AMDGPU_SCHED_H__
#define __AMDGPU_SCHED_H__
#define PEM_CURRENT_POWERPLAY_FEATURE_VERSION 4
#include <drm/drmP.h>
void pem_init_feature_info(struct pp_eventmgr *eventmgr);
void pem_uninit_featureInfo(struct pp_eventmgr *eventmgr);
int pem_register_interrupts(struct pp_eventmgr *eventmgr);
int pem_unregister_interrupts(struct pp_eventmgr *eventmgr);
enum amd_sched_priority amdgpu_to_sched_priority(int amdgpu_priority);
int amdgpu_sched_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
#endif /* _EVENTINIT_H_ */
#endif // __AMDGPU_SCHED_H__

View File

@ -169,14 +169,14 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
*
* @sync: sync object to add fences from reservation object to
* @resv: reservation object with embedded fence
* @shared: true if we should only sync to the exclusive fence
* @explicit_sync: true if we should only sync to the exclusive fence
*
* Sync to the fence
*/
int amdgpu_sync_resv(struct amdgpu_device *adev,
struct amdgpu_sync *sync,
struct reservation_object *resv,
void *owner)
void *owner, bool explicit_sync)
{
struct reservation_object_list *flist;
struct dma_fence *f;
@ -191,6 +191,9 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
f = reservation_object_get_excl(resv);
r = amdgpu_sync_fence(adev, sync, f);
if (explicit_sync)
return r;
flist = reservation_object_get_list(resv);
if (!flist || r)
return r;

View File

@ -45,7 +45,8 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
int amdgpu_sync_resv(struct amdgpu_device *adev,
struct amdgpu_sync *sync,
struct reservation_object *resv,
void *owner);
void *owner,
bool explicit_sync);
struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
struct amdgpu_ring *ring);
struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync);

View File

@ -15,62 +15,6 @@
#define AMDGPU_JOB_GET_TIMELINE_NAME(job) \
job->base.s_fence->finished.ops->get_timeline_name(&job->base.s_fence->finished)
TRACE_EVENT(amdgpu_ttm_tt_populate,
TP_PROTO(struct amdgpu_device *adev, uint64_t dma_address, uint64_t phys_address),
TP_ARGS(adev, dma_address, phys_address),
TP_STRUCT__entry(
__field(uint16_t, domain)
__field(uint8_t, bus)
__field(uint8_t, slot)
__field(uint8_t, func)
__field(uint64_t, dma)
__field(uint64_t, phys)
),
TP_fast_assign(
__entry->domain = pci_domain_nr(adev->pdev->bus);
__entry->bus = adev->pdev->bus->number;
__entry->slot = PCI_SLOT(adev->pdev->devfn);
__entry->func = PCI_FUNC(adev->pdev->devfn);
__entry->dma = dma_address;
__entry->phys = phys_address;
),
TP_printk("%04x:%02x:%02x.%x: 0x%llx => 0x%llx",
(unsigned)__entry->domain,
(unsigned)__entry->bus,
(unsigned)__entry->slot,
(unsigned)__entry->func,
(unsigned long long)__entry->dma,
(unsigned long long)__entry->phys)
);
TRACE_EVENT(amdgpu_ttm_tt_unpopulate,
TP_PROTO(struct amdgpu_device *adev, uint64_t dma_address, uint64_t phys_address),
TP_ARGS(adev, dma_address, phys_address),
TP_STRUCT__entry(
__field(uint16_t, domain)
__field(uint8_t, bus)
__field(uint8_t, slot)
__field(uint8_t, func)
__field(uint64_t, dma)
__field(uint64_t, phys)
),
TP_fast_assign(
__entry->domain = pci_domain_nr(adev->pdev->bus);
__entry->bus = adev->pdev->bus->number;
__entry->slot = PCI_SLOT(adev->pdev->devfn);
__entry->func = PCI_FUNC(adev->pdev->devfn);
__entry->dma = dma_address;
__entry->phys = phys_address;
),
TP_printk("%04x:%02x:%02x.%x: 0x%llx => 0x%llx",
(unsigned)__entry->domain,
(unsigned)__entry->bus,
(unsigned)__entry->slot,
(unsigned)__entry->func,
(unsigned long long)__entry->dma,
(unsigned long long)__entry->phys)
);
TRACE_EVENT(amdgpu_mm_rreg,
TP_PROTO(unsigned did, uint32_t reg, uint32_t value),
TP_ARGS(did, reg, value),
@ -474,5 +418,5 @@ TRACE_EVENT(amdgpu_ttm_bo_move,
/* This part must be outside protection */
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH .
#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/amd/amdgpu
#include <trace/define_trace.h>

View File

@ -1,5 +1,24 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright Red Hat Inc 2010.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Author : Dave Airlie <airlied@redhat.com>
*/
#include <drm/drmP.h>

View File

@ -42,7 +42,9 @@
#include <linux/swap.h>
#include <linux/pagemap.h>
#include <linux/debugfs.h>
#include <linux/iommu.h>
#include "amdgpu.h"
#include "amdgpu_object.h"
#include "amdgpu_trace.h"
#include "bif/bif_4_1_d.h"
@ -208,7 +210,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
placement->num_busy_placement = 1;
return;
}
abo = container_of(bo, struct amdgpu_bo, tbo);
abo = ttm_to_amdgpu_bo(bo);
switch (bo->mem.mem_type) {
case TTM_PL_VRAM:
if (adev->mman.buffer_funcs &&
@ -256,7 +258,7 @@ gtt:
static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp)
{
struct amdgpu_bo *abo = container_of(bo, struct amdgpu_bo, tbo);
struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
if (amdgpu_ttm_tt_get_usermm(bo->ttm))
return -EPERM;
@ -288,97 +290,177 @@ static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
return addr;
}
static int amdgpu_move_blit(struct ttm_buffer_object *bo,
bool evict, bool no_wait_gpu,
struct ttm_mem_reg *new_mem,
struct ttm_mem_reg *old_mem)
/**
* amdgpu_find_mm_node - Helper function finds the drm_mm_node
* corresponding to @offset. It also modifies the offset to be
* within the drm_mm_node returned
*/
static struct drm_mm_node *amdgpu_find_mm_node(struct ttm_mem_reg *mem,
unsigned long *offset)
{
struct drm_mm_node *mm_node = mem->mm_node;
while (*offset >= (mm_node->size << PAGE_SHIFT)) {
*offset -= (mm_node->size << PAGE_SHIFT);
++mm_node;
}
return mm_node;
}
/**
* amdgpu_copy_ttm_mem_to_mem - Helper function for copy
*
* The function copies @size bytes from {src->mem + src->offset} to
* {dst->mem + dst->offset}. src->bo and dst->bo could be same BO for a
* move and different for a BO to BO copy.
*
* @f: Returns the last fence if multiple jobs are submitted.
*/
int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
struct amdgpu_copy_mem *src,
struct amdgpu_copy_mem *dst,
uint64_t size,
struct reservation_object *resv,
struct dma_fence **f)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
struct drm_mm_node *old_mm, *new_mm;
uint64_t old_start, old_size, new_start, new_size;
unsigned long num_pages;
struct drm_mm_node *src_mm, *dst_mm;
uint64_t src_node_start, dst_node_start, src_node_size,
dst_node_size, src_page_offset, dst_page_offset;
struct dma_fence *fence = NULL;
int r;
BUILD_BUG_ON((PAGE_SIZE % AMDGPU_GPU_PAGE_SIZE) != 0);
int r = 0;
const uint64_t GTT_MAX_BYTES = (AMDGPU_GTT_MAX_TRANSFER_SIZE *
AMDGPU_GPU_PAGE_SIZE);
if (!ring->ready) {
DRM_ERROR("Trying to move memory with ring turned off.\n");
return -EINVAL;
}
old_mm = old_mem->mm_node;
old_size = old_mm->size;
old_start = amdgpu_mm_node_addr(bo, old_mm, old_mem);
src_mm = amdgpu_find_mm_node(src->mem, &src->offset);
src_node_start = amdgpu_mm_node_addr(src->bo, src_mm, src->mem) +
src->offset;
src_node_size = (src_mm->size << PAGE_SHIFT) - src->offset;
src_page_offset = src_node_start & (PAGE_SIZE - 1);
new_mm = new_mem->mm_node;
new_size = new_mm->size;
new_start = amdgpu_mm_node_addr(bo, new_mm, new_mem);
dst_mm = amdgpu_find_mm_node(dst->mem, &dst->offset);
dst_node_start = amdgpu_mm_node_addr(dst->bo, dst_mm, dst->mem) +
dst->offset;
dst_node_size = (dst_mm->size << PAGE_SHIFT) - dst->offset;
dst_page_offset = dst_node_start & (PAGE_SIZE - 1);
num_pages = new_mem->num_pages;
mutex_lock(&adev->mman.gtt_window_lock);
while (num_pages) {
unsigned long cur_pages = min(min(old_size, new_size),
(u64)AMDGPU_GTT_MAX_TRANSFER_SIZE);
uint64_t from = old_start, to = new_start;
while (size) {
unsigned long cur_size;
uint64_t from = src_node_start, to = dst_node_start;
struct dma_fence *next;
if (old_mem->mem_type == TTM_PL_TT &&
!amdgpu_gtt_mgr_is_allocated(old_mem)) {
r = amdgpu_map_buffer(bo, old_mem, cur_pages,
old_start, 0, ring, &from);
/* Copy size cannot exceed GTT_MAX_BYTES. So if src or dst
* begins at an offset, then adjust the size accordingly
*/
cur_size = min3(min(src_node_size, dst_node_size), size,
GTT_MAX_BYTES);
if (cur_size + src_page_offset > GTT_MAX_BYTES ||
cur_size + dst_page_offset > GTT_MAX_BYTES)
cur_size -= max(src_page_offset, dst_page_offset);
/* Map only what needs to be accessed. Map src to window 0 and
* dst to window 1
*/
if (src->mem->mem_type == TTM_PL_TT &&
!amdgpu_gtt_mgr_is_allocated(src->mem)) {
r = amdgpu_map_buffer(src->bo, src->mem,
PFN_UP(cur_size + src_page_offset),
src_node_start, 0, ring,
&from);
if (r)
goto error;
/* Adjust the offset because amdgpu_map_buffer returns
* start of mapped page
*/
from += src_page_offset;
}
if (new_mem->mem_type == TTM_PL_TT &&
!amdgpu_gtt_mgr_is_allocated(new_mem)) {
r = amdgpu_map_buffer(bo, new_mem, cur_pages,
new_start, 1, ring, &to);
if (dst->mem->mem_type == TTM_PL_TT &&
!amdgpu_gtt_mgr_is_allocated(dst->mem)) {
r = amdgpu_map_buffer(dst->bo, dst->mem,
PFN_UP(cur_size + dst_page_offset),
dst_node_start, 1, ring,
&to);
if (r)
goto error;
to += dst_page_offset;
}
r = amdgpu_copy_buffer(ring, from, to,
cur_pages * PAGE_SIZE,
bo->resv, &next, false, true);
r = amdgpu_copy_buffer(ring, from, to, cur_size,
resv, &next, false, true);
if (r)
goto error;
dma_fence_put(fence);
fence = next;
num_pages -= cur_pages;
if (!num_pages)
size -= cur_size;
if (!size)
break;
old_size -= cur_pages;
if (!old_size) {
old_start = amdgpu_mm_node_addr(bo, ++old_mm, old_mem);
old_size = old_mm->size;
src_node_size -= cur_size;
if (!src_node_size) {
src_node_start = amdgpu_mm_node_addr(src->bo, ++src_mm,
src->mem);
src_node_size = (src_mm->size << PAGE_SHIFT);
} else {
old_start += cur_pages * PAGE_SIZE;
src_node_start += cur_size;
src_page_offset = src_node_start & (PAGE_SIZE - 1);
}
new_size -= cur_pages;
if (!new_size) {
new_start = amdgpu_mm_node_addr(bo, ++new_mm, new_mem);
new_size = new_mm->size;
dst_node_size -= cur_size;
if (!dst_node_size) {
dst_node_start = amdgpu_mm_node_addr(dst->bo, ++dst_mm,
dst->mem);
dst_node_size = (dst_mm->size << PAGE_SHIFT);
} else {
new_start += cur_pages * PAGE_SIZE;
dst_node_start += cur_size;
dst_page_offset = dst_node_start & (PAGE_SIZE - 1);
}
}
error:
mutex_unlock(&adev->mman.gtt_window_lock);
if (f)
*f = dma_fence_get(fence);
dma_fence_put(fence);
return r;
}
static int amdgpu_move_blit(struct ttm_buffer_object *bo,
bool evict, bool no_wait_gpu,
struct ttm_mem_reg *new_mem,
struct ttm_mem_reg *old_mem)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct amdgpu_copy_mem src, dst;
struct dma_fence *fence = NULL;
int r;
src.bo = bo;
dst.bo = bo;
src.mem = old_mem;
dst.mem = new_mem;
src.offset = 0;
dst.offset = 0;
r = amdgpu_ttm_copy_mem_to_mem(adev, &src, &dst,
new_mem->num_pages << PAGE_SHIFT,
bo->resv, &fence);
if (r)
goto error;
r = ttm_bo_pipeline_move(bo, fence, evict, new_mem);
dma_fence_put(fence);
return r;
error:
mutex_unlock(&adev->mman.gtt_window_lock);
if (fence)
dma_fence_wait(fence, false);
dma_fence_put(fence);
@ -483,7 +565,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo,
int r;
/* Can't move a pinned BO */
abo = container_of(bo, struct amdgpu_bo, tbo);
abo = ttm_to_amdgpu_bo(bo);
if (WARN_ON_ONCE(abo->pin_count > 0))
return -EINVAL;
@ -581,13 +663,12 @@ static void amdgpu_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_re
static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
unsigned long page_offset)
{
struct drm_mm_node *mm = bo->mem.mm_node;
uint64_t size = mm->size;
uint64_t offset = page_offset;
struct drm_mm_node *mm;
unsigned long offset = (page_offset << PAGE_SHIFT);
page_offset = do_div(offset, size);
mm += offset;
return (bo->mem.bus.base >> PAGE_SHIFT) + mm->start + page_offset;
mm = amdgpu_find_mm_node(&bo->mem, &offset);
return (bo->mem.bus.base >> PAGE_SHIFT) + mm->start +
(offset >> PAGE_SHIFT);
}
/*
@ -608,6 +689,7 @@ struct amdgpu_ttm_tt {
spinlock_t guptasklock;
struct list_head guptasks;
atomic_t mmu_invalidations;
uint32_t last_set_pages;
struct list_head list;
};
@ -621,6 +703,8 @@ int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
if (!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY))
flags |= FOLL_WRITE;
down_read(&current->mm->mmap_sem);
if (gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) {
/* check that we only use anonymous memory
to prevent problems with writeback */
@ -628,8 +712,10 @@ int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
struct vm_area_struct *vma;
vma = find_vma(gtt->usermm, gtt->userptr);
if (!vma || vma->vm_file || vma->vm_end < end)
if (!vma || vma->vm_file || vma->vm_end < end) {
up_read(&current->mm->mmap_sem);
return -EPERM;
}
}
do {
@ -656,42 +742,44 @@ int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
} while (pinned < ttm->num_pages);
up_read(&current->mm->mmap_sem);
return 0;
release_pages:
release_pages(pages, pinned);
up_read(&current->mm->mmap_sem);
return r;
}
static void amdgpu_trace_dma_map(struct ttm_tt *ttm)
void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
struct amdgpu_ttm_tt *gtt = (void *)ttm;
unsigned i;
if (unlikely(trace_amdgpu_ttm_tt_populate_enabled())) {
for (i = 0; i < ttm->num_pages; i++) {
trace_amdgpu_ttm_tt_populate(
adev,
gtt->ttm.dma_address[i],
page_to_phys(ttm->pages[i]));
}
gtt->last_set_pages = atomic_read(&gtt->mmu_invalidations);
for (i = 0; i < ttm->num_pages; ++i) {
if (ttm->pages[i])
put_page(ttm->pages[i]);
ttm->pages[i] = pages ? pages[i] : NULL;
}
}
static void amdgpu_trace_dma_unmap(struct ttm_tt *ttm)
void amdgpu_ttm_tt_mark_user_pages(struct ttm_tt *ttm)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
struct amdgpu_ttm_tt *gtt = (void *)ttm;
unsigned i;
if (unlikely(trace_amdgpu_ttm_tt_unpopulate_enabled())) {
for (i = 0; i < ttm->num_pages; i++) {
trace_amdgpu_ttm_tt_unpopulate(
adev,
gtt->ttm.dma_address[i],
page_to_phys(ttm->pages[i]));
}
for (i = 0; i < ttm->num_pages; ++i) {
struct page *page = ttm->pages[i];
if (!page)
continue;
if (!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY))
set_page_dirty(page);
mark_page_accessed(page);
}
}
@ -721,8 +809,6 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
gtt->ttm.dma_address, ttm->num_pages);
amdgpu_trace_dma_map(ttm);
return 0;
release_sg:
@ -734,7 +820,6 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
struct amdgpu_ttm_tt *gtt = (void *)ttm;
struct sg_page_iter sg_iter;
int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
enum dma_data_direction direction = write ?
@ -747,16 +832,7 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
/* free the sg table and pages again */
dma_unmap_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
for_each_sg_page(ttm->sg->sgl, &sg_iter, ttm->sg->nents, 0) {
struct page *page = sg_page_iter_page(&sg_iter);
if (!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY))
set_page_dirty(page);
mark_page_accessed(page);
put_page(page);
}
amdgpu_trace_dma_unmap(ttm);
amdgpu_ttm_tt_mark_user_pages(ttm);
sg_free_table(ttm->sg);
}
@ -818,7 +894,6 @@ int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem)
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct ttm_tt *ttm = bo->ttm;
struct ttm_mem_reg tmp;
struct ttm_placement placement;
struct ttm_place placements;
int r;
@ -834,7 +909,8 @@ int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem)
placement.busy_placement = &placements;
placements.fpfn = 0;
placements.lpfn = adev->mc.gart_size >> PAGE_SHIFT;
placements.flags = bo->mem.placement | TTM_PL_FLAG_TT;
placements.flags = (bo->mem.placement & ~TTM_PL_MASK_MEM) |
TTM_PL_FLAG_TT;
r = ttm_bo_mem_space(bo, &placement, &tmp, true, false);
if (unlikely(r))
@ -941,8 +1017,6 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
struct amdgpu_ttm_tt *gtt = (void *)ttm;
unsigned i;
int r;
bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
if (ttm->state != tt_unpopulated)
@ -962,52 +1036,26 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm)
drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
gtt->ttm.dma_address, ttm->num_pages);
ttm->state = tt_unbound;
r = 0;
goto trace_mappings;
return 0;
}
#ifdef CONFIG_SWIOTLB
if (swiotlb_nr_tbl()) {
r = ttm_dma_populate(&gtt->ttm, adev->dev);
goto trace_mappings;
return ttm_dma_populate(&gtt->ttm, adev->dev);
}
#endif
r = ttm_pool_populate(ttm);
if (r) {
return r;
}
for (i = 0; i < ttm->num_pages; i++) {
gtt->ttm.dma_address[i] = pci_map_page(adev->pdev, ttm->pages[i],
0, PAGE_SIZE,
PCI_DMA_BIDIRECTIONAL);
if (pci_dma_mapping_error(adev->pdev, gtt->ttm.dma_address[i])) {
while (i--) {
pci_unmap_page(adev->pdev, gtt->ttm.dma_address[i],
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
gtt->ttm.dma_address[i] = 0;
}
ttm_pool_unpopulate(ttm);
return -EFAULT;
}
}
r = 0;
trace_mappings:
if (likely(!r))
amdgpu_trace_dma_map(ttm);
return r;
return ttm_populate_and_map_pages(adev->dev, &gtt->ttm);
}
static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
{
struct amdgpu_device *adev;
struct amdgpu_ttm_tt *gtt = (void *)ttm;
unsigned i;
bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
if (gtt && gtt->userptr) {
amdgpu_ttm_tt_set_user_pages(ttm, NULL);
kfree(ttm->sg);
ttm->page_flags &= ~TTM_PAGE_FLAG_SG;
return;
@ -1018,8 +1066,6 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
adev = amdgpu_ttm_adev(ttm->bdev);
amdgpu_trace_dma_unmap(ttm);
#ifdef CONFIG_SWIOTLB
if (swiotlb_nr_tbl()) {
ttm_dma_unpopulate(&gtt->ttm, adev->dev);
@ -1027,14 +1073,7 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
}
#endif
for (i = 0; i < ttm->num_pages; i++) {
if (gtt->ttm.dma_address[i]) {
pci_unmap_page(adev->pdev, gtt->ttm.dma_address[i],
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
}
}
ttm_pool_unpopulate(ttm);
ttm_unmap_and_unpopulate_pages(adev->dev, &gtt->ttm);
}
int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
@ -1051,6 +1090,7 @@ int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
spin_lock_init(&gtt->guptasklock);
INIT_LIST_HEAD(&gtt->guptasks);
atomic_set(&gtt->mmu_invalidations, 0);
gtt->last_set_pages = 0;
return 0;
}
@ -1103,6 +1143,16 @@ bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
return prev_invalidated != *last_invalidated;
}
bool amdgpu_ttm_tt_userptr_needs_pages(struct ttm_tt *ttm)
{
struct amdgpu_ttm_tt *gtt = (void *)ttm;
if (gtt == NULL || !gtt->userptr)
return false;
return atomic_read(&gtt->mmu_invalidations) != gtt->last_set_pages;
}
bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
{
struct amdgpu_ttm_tt *gtt = (void *)ttm;
@ -1143,9 +1193,6 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
unsigned long num_pages = bo->mem.num_pages;
struct drm_mm_node *node = bo->mem.mm_node;
if (bo->mem.start != AMDGPU_BO_INVALID_OFFSET)
return ttm_bo_eviction_valuable(bo, place);
switch (bo->mem.mem_type) {
case TTM_PL_TT:
return true;
@ -1160,7 +1207,7 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
num_pages -= node->size;
++node;
}
break;
return false;
default:
break;
@ -1173,9 +1220,9 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
unsigned long offset,
void *buf, int len, int write)
{
struct amdgpu_bo *abo = container_of(bo, struct amdgpu_bo, tbo);
struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
struct drm_mm_node *nodes = abo->tbo.mem.mm_node;
struct drm_mm_node *nodes;
uint32_t value = 0;
int ret = 0;
uint64_t pos;
@ -1184,10 +1231,7 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
if (bo->mem.mem_type != TTM_PL_VRAM)
return -EIO;
while (offset >= (nodes->size << PAGE_SHIFT)) {
offset -= nodes->size << PAGE_SHIFT;
++nodes;
}
nodes = amdgpu_find_mm_node(&abo->tbo.mem, &offset);
pos = (nodes->start << PAGE_SHIFT) + offset;
while (len && pos < adev->mc.mc_vram_size) {
@ -1202,14 +1246,14 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
}
spin_lock_irqsave(&adev->mmio_idx_lock, flags);
WREG32(mmMM_INDEX, ((uint32_t)aligned_pos) | 0x80000000);
WREG32(mmMM_INDEX_HI, aligned_pos >> 31);
WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)aligned_pos) | 0x80000000);
WREG32_NO_KIQ(mmMM_INDEX_HI, aligned_pos >> 31);
if (!write || mask != 0xffffffff)
value = RREG32(mmMM_DATA);
value = RREG32_NO_KIQ(mmMM_DATA);
if (write) {
value &= ~mask;
value |= (*(uint32_t *)buf << shift) & mask;
WREG32(mmMM_DATA, value);
WREG32_NO_KIQ(mmMM_DATA, value);
}
spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
if (!write) {
@ -1286,6 +1330,15 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
/* Change the size here instead of the init above so only lpfn is affected */
amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size);
/*
*The reserved vram for firmware must be pinned to the specified
*place on the VRAM, so reserve it early.
*/
r = amdgpu_fw_reserve_vram_init(adev);
if (r) {
return r;
}
r = amdgpu_bo_create_kernel(adev, adev->mc.stolen_size, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM,
&adev->stolen_vga_memory,
@ -1510,7 +1563,8 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
job->vm_needs_flush = vm_needs_flush;
if (resv) {
r = amdgpu_sync_resv(adev, &job->sync, resv,
AMDGPU_FENCE_OWNER_UNDEFINED);
AMDGPU_FENCE_OWNER_UNDEFINED,
false);
if (r) {
DRM_ERROR("sync failed (%d).\n", r);
goto error_free;
@ -1557,8 +1611,8 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
struct dma_fence **fence)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
/* max_bytes applies to SDMA_OP_PTEPDE as well as SDMA_OP_CONST_FILL*/
uint32_t max_bytes = adev->mman.buffer_funcs->fill_max_bytes;
uint32_t max_bytes = 8 *
adev->vm_manager.vm_pte_funcs->set_max_nums_pte_pde;
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
struct drm_mm_node *mm_node;
@ -1590,8 +1644,8 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
++mm_node;
}
/* 10 double words for each SDMA_OP_PTEPDE cmd */
num_dw = num_loops * 10;
/* num of dwords for each SDMA_OP_PTEPDE cmd */
num_dw = num_loops * adev->vm_manager.vm_pte_funcs->set_pte_pde_num_dw;
/* for IB padding */
num_dw += 64;
@ -1602,7 +1656,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
if (resv) {
r = amdgpu_sync_resv(adev, &job->sync, resv,
AMDGPU_FENCE_OWNER_UNDEFINED);
AMDGPU_FENCE_OWNER_UNDEFINED, false);
if (r) {
DRM_ERROR("sync failed (%d).\n", r);
goto error_free;
@ -1697,9 +1751,9 @@ static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf,
return result;
spin_lock_irqsave(&adev->mmio_idx_lock, flags);
WREG32(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000);
WREG32(mmMM_INDEX_HI, *pos >> 31);
value = RREG32(mmMM_DATA);
WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000);
WREG32_NO_KIQ(mmMM_INDEX_HI, *pos >> 31);
value = RREG32_NO_KIQ(mmMM_DATA);
spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
r = put_user(value, (uint32_t *)buf);
@ -1715,10 +1769,50 @@ static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf,
return result;
}
static ssize_t amdgpu_ttm_vram_write(struct file *f, const char __user *buf,
size_t size, loff_t *pos)
{
struct amdgpu_device *adev = file_inode(f)->i_private;
ssize_t result = 0;
int r;
if (size & 0x3 || *pos & 0x3)
return -EINVAL;
if (*pos >= adev->mc.mc_vram_size)
return -ENXIO;
while (size) {
unsigned long flags;
uint32_t value;
if (*pos >= adev->mc.mc_vram_size)
return result;
r = get_user(value, (uint32_t *)buf);
if (r)
return r;
spin_lock_irqsave(&adev->mmio_idx_lock, flags);
WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000);
WREG32_NO_KIQ(mmMM_INDEX_HI, *pos >> 31);
WREG32_NO_KIQ(mmMM_DATA, value);
spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
result += 4;
buf += 4;
*pos += 4;
size -= 4;
}
return result;
}
static const struct file_operations amdgpu_ttm_vram_fops = {
.owner = THIS_MODULE,
.read = amdgpu_ttm_vram_read,
.llseek = default_llseek
.write = amdgpu_ttm_vram_write,
.llseek = default_llseek,
};
#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
@ -1770,6 +1864,53 @@ static const struct file_operations amdgpu_ttm_gtt_fops = {
#endif
static ssize_t amdgpu_iova_to_phys_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
struct amdgpu_device *adev = file_inode(f)->i_private;
int r;
uint64_t phys;
struct iommu_domain *dom;
// always return 8 bytes
if (size != 8)
return -EINVAL;
// only accept page addresses
if (*pos & 0xFFF)
return -EINVAL;
dom = iommu_get_domain_for_dev(adev->dev);
if (dom)
phys = iommu_iova_to_phys(dom, *pos);
else
phys = *pos;
r = copy_to_user(buf, &phys, 8);
if (r)
return -EFAULT;
return 8;
}
static const struct file_operations amdgpu_ttm_iova_fops = {
.owner = THIS_MODULE,
.read = amdgpu_iova_to_phys_read,
.llseek = default_llseek
};
static const struct {
char *name;
const struct file_operations *fops;
int domain;
} ttm_debugfs_entries[] = {
{ "amdgpu_vram", &amdgpu_ttm_vram_fops, TTM_PL_VRAM },
#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
{ "amdgpu_gtt", &amdgpu_ttm_gtt_fops, TTM_PL_TT },
#endif
{ "amdgpu_iova", &amdgpu_ttm_iova_fops, TTM_PL_SYSTEM },
};
#endif
static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
@ -1780,22 +1921,21 @@ static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
struct drm_minor *minor = adev->ddev->primary;
struct dentry *ent, *root = minor->debugfs_root;
ent = debugfs_create_file("amdgpu_vram", S_IFREG | S_IRUGO, root,
adev, &amdgpu_ttm_vram_fops);
if (IS_ERR(ent))
return PTR_ERR(ent);
i_size_write(ent->d_inode, adev->mc.mc_vram_size);
adev->mman.vram = ent;
for (count = 0; count < ARRAY_SIZE(ttm_debugfs_entries); count++) {
ent = debugfs_create_file(
ttm_debugfs_entries[count].name,
S_IFREG | S_IRUGO, root,
adev,
ttm_debugfs_entries[count].fops);
if (IS_ERR(ent))
return PTR_ERR(ent);
if (ttm_debugfs_entries[count].domain == TTM_PL_VRAM)
i_size_write(ent->d_inode, adev->mc.mc_vram_size);
else if (ttm_debugfs_entries[count].domain == TTM_PL_TT)
i_size_write(ent->d_inode, adev->mc.gart_size);
adev->mman.debugfs_entries[count] = ent;
}
#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
ent = debugfs_create_file("amdgpu_gtt", S_IFREG | S_IRUGO, root,
adev, &amdgpu_ttm_gtt_fops);
if (IS_ERR(ent))
return PTR_ERR(ent);
i_size_write(ent->d_inode, adev->mc.gart_size);
adev->mman.gtt = ent;
#endif
count = ARRAY_SIZE(amdgpu_ttm_debugfs_list);
#ifdef CONFIG_SWIOTLB
@ -1805,7 +1945,6 @@ static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
return amdgpu_debugfs_add_files(adev, amdgpu_ttm_debugfs_list, count);
#else
return 0;
#endif
}
@ -1813,14 +1952,9 @@ static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev)
{
#if defined(CONFIG_DEBUG_FS)
unsigned i;
debugfs_remove(adev->mman.vram);
adev->mman.vram = NULL;
#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
debugfs_remove(adev->mman.gtt);
adev->mman.gtt = NULL;
#endif
for (i = 0; i < ARRAY_SIZE(ttm_debugfs_entries); i++)
debugfs_remove(adev->mman.debugfs_entries[i]);
#endif
}

View File

@ -24,6 +24,7 @@
#ifndef __AMDGPU_TTM_H__
#define __AMDGPU_TTM_H__
#include "amdgpu.h"
#include "gpu_scheduler.h"
#define AMDGPU_PL_GDS (TTM_PL_PRIV + 0)
@ -45,8 +46,7 @@ struct amdgpu_mman {
bool initialized;
#if defined(CONFIG_DEBUG_FS)
struct dentry *vram;
struct dentry *gtt;
struct dentry *debugfs_entries[8];
#endif
/* buffer handling */
@ -58,6 +58,12 @@ struct amdgpu_mman {
struct amd_sched_entity entity;
};
struct amdgpu_copy_mem {
struct ttm_buffer_object *bo;
struct ttm_mem_reg *mem;
unsigned long offset;
};
extern const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func;
extern const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func;
@ -72,6 +78,12 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
struct reservation_object *resv,
struct dma_fence **fence, bool direct_submit,
bool vm_needs_flush);
int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
struct amdgpu_copy_mem *src,
struct amdgpu_copy_mem *dst,
uint64_t size,
struct reservation_object *resv,
struct dma_fence **f);
int amdgpu_fill_buffer(struct amdgpu_bo *bo,
uint64_t src_data,
struct reservation_object *resv,
@ -82,4 +94,20 @@ bool amdgpu_ttm_is_bound(struct ttm_tt *ttm);
int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem);
int amdgpu_ttm_recover_gart(struct amdgpu_device *adev);
int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages);
void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages);
void amdgpu_ttm_tt_mark_user_pages(struct ttm_tt *ttm);
int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
uint32_t flags);
bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm);
struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm);
bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
unsigned long end);
bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
int *last_invalidated);
bool amdgpu_ttm_tt_userptr_needs_pages(struct ttm_tt *ttm);
bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm);
uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
struct ttm_mem_reg *mem);
#endif

View File

@ -270,12 +270,8 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type)
else
return AMDGPU_FW_LOAD_SMU;
case CHIP_VEGA10:
if (!load_type)
return AMDGPU_FW_LOAD_DIRECT;
else
return AMDGPU_FW_LOAD_PSP;
case CHIP_RAVEN:
if (load_type != 2)
if (!load_type)
return AMDGPU_FW_LOAD_DIRECT;
else
return AMDGPU_FW_LOAD_PSP;
@ -364,8 +360,6 @@ static int amdgpu_ucode_patch_jt(struct amdgpu_firmware_info *ucode,
int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
{
struct amdgpu_bo **bo = &adev->firmware.fw_buf;
uint64_t fw_mc_addr;
void *fw_buf_ptr = NULL;
uint64_t fw_offset = 0;
int i, err;
struct amdgpu_firmware_info *ucode = NULL;
@ -376,37 +370,39 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
return 0;
}
err = amdgpu_bo_create(adev, adev->firmware.fw_size, PAGE_SIZE, true,
amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, NULL, 0, bo);
if (err) {
dev_err(adev->dev, "(%d) Firmware buffer allocate failed\n", err);
goto failed;
if (!amdgpu_sriov_vf(adev) || !adev->in_sriov_reset) {
err = amdgpu_bo_create(adev, adev->firmware.fw_size, PAGE_SIZE, true,
amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, NULL, 0, bo);
if (err) {
dev_err(adev->dev, "(%d) Firmware buffer allocate failed\n", err);
goto failed;
}
err = amdgpu_bo_reserve(*bo, false);
if (err) {
dev_err(adev->dev, "(%d) Firmware buffer reserve failed\n", err);
goto failed_reserve;
}
err = amdgpu_bo_pin(*bo, amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
&adev->firmware.fw_buf_mc);
if (err) {
dev_err(adev->dev, "(%d) Firmware buffer pin failed\n", err);
goto failed_pin;
}
err = amdgpu_bo_kmap(*bo, &adev->firmware.fw_buf_ptr);
if (err) {
dev_err(adev->dev, "(%d) Firmware buffer kmap failed\n", err);
goto failed_kmap;
}
amdgpu_bo_unreserve(*bo);
}
err = amdgpu_bo_reserve(*bo, false);
if (err) {
dev_err(adev->dev, "(%d) Firmware buffer reserve failed\n", err);
goto failed_reserve;
}
err = amdgpu_bo_pin(*bo, amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
&fw_mc_addr);
if (err) {
dev_err(adev->dev, "(%d) Firmware buffer pin failed\n", err);
goto failed_pin;
}
err = amdgpu_bo_kmap(*bo, &fw_buf_ptr);
if (err) {
dev_err(adev->dev, "(%d) Firmware buffer kmap failed\n", err);
goto failed_kmap;
}
amdgpu_bo_unreserve(*bo);
memset(fw_buf_ptr, 0, adev->firmware.fw_size);
memset(adev->firmware.fw_buf_ptr, 0, adev->firmware.fw_size);
/*
* if SMU loaded firmware, it needn't add SMC, UVD, and VCE
@ -425,14 +421,14 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
ucode = &adev->firmware.ucode[i];
if (ucode->fw) {
header = (const struct common_firmware_header *)ucode->fw->data;
amdgpu_ucode_init_single_fw(adev, ucode, fw_mc_addr + fw_offset,
(void *)((uint8_t *)fw_buf_ptr + fw_offset));
amdgpu_ucode_init_single_fw(adev, ucode, adev->firmware.fw_buf_mc + fw_offset,
adev->firmware.fw_buf_ptr + fw_offset);
if (i == AMDGPU_UCODE_ID_CP_MEC1 &&
adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
const struct gfx_firmware_header_v1_0 *cp_hdr;
cp_hdr = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
amdgpu_ucode_patch_jt(ucode, fw_mc_addr + fw_offset,
fw_buf_ptr + fw_offset);
amdgpu_ucode_patch_jt(ucode, adev->firmware.fw_buf_mc + fw_offset,
adev->firmware.fw_buf_ptr + fw_offset);
fw_offset += ALIGN(le32_to_cpu(cp_hdr->jt_size) << 2, PAGE_SIZE);
}
fw_offset += ALIGN(ucode->ucode_size, PAGE_SIZE);

View File

@ -269,6 +269,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
{
int i;
kfree(adev->uvd.saved_bo);
amd_sched_entity_fini(&adev->uvd.ring.sched, &adev->uvd.entity);
@ -279,6 +280,9 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
amdgpu_ring_fini(&adev->uvd.ring);
for (i = 0; i < AMDGPU_MAX_UVD_ENC_RINGS; ++i)
amdgpu_ring_fini(&adev->uvd.ring_enc[i]);
release_firmware(adev->uvd.fw);
return 0;
@ -410,10 +414,10 @@ static int amdgpu_uvd_cs_pass1(struct amdgpu_uvd_cs_ctx *ctx)
uint64_t addr = amdgpu_uvd_get_addr_from_ctx(ctx);
int r = 0;
mapping = amdgpu_cs_find_mapping(ctx->parser, addr, &bo);
if (mapping == NULL) {
r = amdgpu_cs_find_mapping(ctx->parser, addr, &bo, &mapping);
if (r) {
DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr);
return -EINVAL;
return r;
}
if (!ctx->parser->adev->uvd.address_64_bit) {
@ -737,10 +741,10 @@ static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx)
uint64_t addr = amdgpu_uvd_get_addr_from_ctx(ctx);
int r;
mapping = amdgpu_cs_find_mapping(ctx->parser, addr, &bo);
if (mapping == NULL) {
r = amdgpu_cs_find_mapping(ctx->parser, addr, &bo, &mapping);
if (r) {
DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr);
return -EINVAL;
return r;
}
start = amdgpu_bo_gpu_offset(bo);
@ -917,10 +921,6 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
return -EINVAL;
}
r = amdgpu_cs_sysvm_access_required(parser);
if (r)
return r;
ctx.parser = parser;
ctx.buf_sizes = buf_sizes;
ctx.ib_idx = ib_idx;

View File

@ -559,6 +559,7 @@ static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx,
struct amdgpu_bo_va_mapping *mapping;
struct amdgpu_bo *bo;
uint64_t addr;
int r;
if (index == 0xffffffff)
index = 0;
@ -567,11 +568,11 @@ static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx,
((uint64_t)amdgpu_get_ib_value(p, ib_idx, hi)) << 32;
addr += ((uint64_t)size) * ((uint64_t)index);
mapping = amdgpu_cs_find_mapping(p, addr, &bo);
if (mapping == NULL) {
r = amdgpu_cs_find_mapping(p, addr, &bo, &mapping);
if (r) {
DRM_ERROR("Can't find BO for addr 0x%010Lx %d %d %d %d\n",
addr, lo, hi, size, index);
return -EINVAL;
return r;
}
if ((addr + (uint64_t)size) >
@ -647,15 +648,11 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
uint32_t allocated = 0;
uint32_t tmp, handle = 0;
uint32_t *size = &tmp;
int i, r, idx = 0;
int i, r = 0, idx = 0;
p->job->vm = NULL;
ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
r = amdgpu_cs_sysvm_access_required(p);
if (r)
return r;
while (idx < ib->length_dw) {
uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx);
uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1);

View File

@ -25,30 +25,26 @@
#include "amdgpu_vf_error.h"
#include "mxgpu_ai.h"
#define AMDGPU_VF_ERROR_ENTRY_SIZE 16
/* struct error_entry - amdgpu VF error information. */
struct amdgpu_vf_error_buffer {
int read_count;
int write_count;
uint16_t code[AMDGPU_VF_ERROR_ENTRY_SIZE];
uint16_t flags[AMDGPU_VF_ERROR_ENTRY_SIZE];
uint64_t data[AMDGPU_VF_ERROR_ENTRY_SIZE];
};
struct amdgpu_vf_error_buffer admgpu_vf_errors;
void amdgpu_vf_error_put(uint16_t sub_error_code, uint16_t error_flags, uint64_t error_data)
void amdgpu_vf_error_put(struct amdgpu_device *adev,
uint16_t sub_error_code,
uint16_t error_flags,
uint64_t error_data)
{
int index;
uint16_t error_code = AMDGIM_ERROR_CODE(AMDGIM_ERROR_CATEGORY_VF, sub_error_code);
uint16_t error_code;
index = admgpu_vf_errors.write_count % AMDGPU_VF_ERROR_ENTRY_SIZE;
admgpu_vf_errors.code [index] = error_code;
admgpu_vf_errors.flags [index] = error_flags;
admgpu_vf_errors.data [index] = error_data;
admgpu_vf_errors.write_count ++;
if (!amdgpu_sriov_vf(adev))
return;
error_code = AMDGIM_ERROR_CODE(AMDGIM_ERROR_CATEGORY_VF, sub_error_code);
mutex_lock(&adev->virt.vf_errors.lock);
index = adev->virt.vf_errors.write_count % AMDGPU_VF_ERROR_ENTRY_SIZE;
adev->virt.vf_errors.code [index] = error_code;
adev->virt.vf_errors.flags [index] = error_flags;
adev->virt.vf_errors.data [index] = error_data;
adev->virt.vf_errors.write_count ++;
mutex_unlock(&adev->virt.vf_errors.lock);
}
@ -58,7 +54,8 @@ void amdgpu_vf_error_trans_all(struct amdgpu_device *adev)
u32 data1, data2, data3;
int index;
if ((NULL == adev) || (!amdgpu_sriov_vf(adev)) || (!adev->virt.ops) || (!adev->virt.ops->trans_msg)) {
if ((NULL == adev) || (!amdgpu_sriov_vf(adev)) ||
(!adev->virt.ops) || (!adev->virt.ops->trans_msg)) {
return;
}
/*
@ -68,18 +65,22 @@ void amdgpu_vf_error_trans_all(struct amdgpu_device *adev)
return;
}
*/
mutex_lock(&adev->virt.vf_errors.lock);
/* The errors are overlay of array, correct read_count as full. */
if (admgpu_vf_errors.write_count - admgpu_vf_errors.read_count > AMDGPU_VF_ERROR_ENTRY_SIZE) {
admgpu_vf_errors.read_count = admgpu_vf_errors.write_count - AMDGPU_VF_ERROR_ENTRY_SIZE;
if (adev->virt.vf_errors.write_count - adev->virt.vf_errors.read_count > AMDGPU_VF_ERROR_ENTRY_SIZE) {
adev->virt.vf_errors.read_count = adev->virt.vf_errors.write_count - AMDGPU_VF_ERROR_ENTRY_SIZE;
}
while (admgpu_vf_errors.read_count < admgpu_vf_errors.write_count) {
index =admgpu_vf_errors.read_count % AMDGPU_VF_ERROR_ENTRY_SIZE;
data1 = AMDGIM_ERROR_CODE_FLAGS_TO_MAILBOX (admgpu_vf_errors.code[index], admgpu_vf_errors.flags[index]);
data2 = admgpu_vf_errors.data[index] & 0xFFFFFFFF;
data3 = (admgpu_vf_errors.data[index] >> 32) & 0xFFFFFFFF;
while (adev->virt.vf_errors.read_count < adev->virt.vf_errors.write_count) {
index =adev->virt.vf_errors.read_count % AMDGPU_VF_ERROR_ENTRY_SIZE;
data1 = AMDGIM_ERROR_CODE_FLAGS_TO_MAILBOX(adev->virt.vf_errors.code[index],
adev->virt.vf_errors.flags[index]);
data2 = adev->virt.vf_errors.data[index] & 0xFFFFFFFF;
data3 = (adev->virt.vf_errors.data[index] >> 32) & 0xFFFFFFFF;
adev->virt.ops->trans_msg(adev, IDH_LOG_VF_ERROR, data1, data2, data3);
admgpu_vf_errors.read_count ++;
adev->virt.vf_errors.read_count ++;
}
mutex_unlock(&adev->virt.vf_errors.lock);
}

View File

@ -56,7 +56,10 @@ enum AMDGIM_ERROR_CATEGORY {
AMDGIM_ERROR_CATEGORY_MAX
};
void amdgpu_vf_error_put(uint16_t sub_error_code, uint16_t error_flags, uint64_t error_data);
void amdgpu_vf_error_put(struct amdgpu_device *adev,
uint16_t sub_error_code,
uint16_t error_flags,
uint64_t error_data);
void amdgpu_vf_error_trans_all (struct amdgpu_device *adev);
#endif /* __VF_ERROR_H__ */

View File

@ -22,7 +22,7 @@
*/
#include "amdgpu.h"
#define MAX_KIQ_REG_WAIT 100000
#define MAX_KIQ_REG_WAIT 100000000 /* in usecs */
int amdgpu_allocate_static_csa(struct amdgpu_device *adev)
{
@ -114,27 +114,25 @@ void amdgpu_virt_init_setting(struct amdgpu_device *adev)
uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
{
signed long r;
uint32_t val;
struct dma_fence *f;
unsigned long flags;
uint32_t val, seq;
struct amdgpu_kiq *kiq = &adev->gfx.kiq;
struct amdgpu_ring *ring = &kiq->ring;
BUG_ON(!ring->funcs->emit_rreg);
mutex_lock(&kiq->ring_mutex);
spin_lock_irqsave(&kiq->ring_lock, flags);
amdgpu_ring_alloc(ring, 32);
amdgpu_ring_emit_rreg(ring, reg);
amdgpu_fence_emit(ring, &f);
amdgpu_fence_emit_polling(ring, &seq);
amdgpu_ring_commit(ring);
mutex_unlock(&kiq->ring_mutex);
spin_unlock_irqrestore(&kiq->ring_lock, flags);
r = dma_fence_wait_timeout(f, false, msecs_to_jiffies(MAX_KIQ_REG_WAIT));
dma_fence_put(f);
r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
if (r < 1) {
DRM_ERROR("wait for kiq fence error: %ld.\n", r);
DRM_ERROR("wait for kiq fence error: %ld\n", r);
return ~0;
}
val = adev->wb.wb[adev->virt.reg_val_offs];
return val;
@ -143,23 +141,23 @@ uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
{
signed long r;
struct dma_fence *f;
unsigned long flags;
uint32_t seq;
struct amdgpu_kiq *kiq = &adev->gfx.kiq;
struct amdgpu_ring *ring = &kiq->ring;
BUG_ON(!ring->funcs->emit_wreg);
mutex_lock(&kiq->ring_mutex);
spin_lock_irqsave(&kiq->ring_lock, flags);
amdgpu_ring_alloc(ring, 32);
amdgpu_ring_emit_wreg(ring, reg, v);
amdgpu_fence_emit(ring, &f);
amdgpu_fence_emit_polling(ring, &seq);
amdgpu_ring_commit(ring);
mutex_unlock(&kiq->ring_mutex);
spin_unlock_irqrestore(&kiq->ring_lock, flags);
r = dma_fence_wait_timeout(f, false, msecs_to_jiffies(MAX_KIQ_REG_WAIT));
r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
if (r < 1)
DRM_ERROR("wait for kiq fence error: %ld.\n", r);
dma_fence_put(f);
DRM_ERROR("wait for kiq fence error: %ld\n", r);
}
/**
@ -274,3 +272,80 @@ void amdgpu_virt_free_mm_table(struct amdgpu_device *adev)
(void *)&adev->virt.mm_table.cpu_addr);
adev->virt.mm_table.gpu_addr = 0;
}
int amdgpu_virt_fw_reserve_get_checksum(void *obj,
unsigned long obj_size,
unsigned int key,
unsigned int chksum)
{
unsigned int ret = key;
unsigned long i = 0;
unsigned char *pos;
pos = (char *)obj;
/* calculate checksum */
for (i = 0; i < obj_size; ++i)
ret += *(pos + i);
/* minus the chksum itself */
pos = (char *)&chksum;
for (i = 0; i < sizeof(chksum); ++i)
ret -= *(pos + i);
return ret;
}
void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
{
uint32_t pf2vf_ver = 0;
uint32_t pf2vf_size = 0;
uint32_t checksum = 0;
uint32_t checkval;
char *str;
adev->virt.fw_reserve.p_pf2vf = NULL;
adev->virt.fw_reserve.p_vf2pf = NULL;
if (adev->fw_vram_usage.va != NULL) {
adev->virt.fw_reserve.p_pf2vf =
(struct amdgim_pf2vf_info_header *)(
adev->fw_vram_usage.va + AMDGIM_DATAEXCHANGE_OFFSET);
pf2vf_ver = adev->virt.fw_reserve.p_pf2vf->version;
AMDGPU_FW_VRAM_PF2VF_READ(adev, header.size, &pf2vf_size);
AMDGPU_FW_VRAM_PF2VF_READ(adev, checksum, &checksum);
/* pf2vf message must be in 4K */
if (pf2vf_size > 0 && pf2vf_size < 4096) {
checkval = amdgpu_virt_fw_reserve_get_checksum(
adev->virt.fw_reserve.p_pf2vf, pf2vf_size,
adev->virt.fw_reserve.checksum_key, checksum);
if (checkval == checksum) {
adev->virt.fw_reserve.p_vf2pf =
((void *)adev->virt.fw_reserve.p_pf2vf +
pf2vf_size);
memset((void *)adev->virt.fw_reserve.p_vf2pf, 0,
sizeof(amdgim_vf2pf_info));
AMDGPU_FW_VRAM_VF2PF_WRITE(adev, header.version,
AMDGPU_FW_VRAM_VF2PF_VER);
AMDGPU_FW_VRAM_VF2PF_WRITE(adev, header.size,
sizeof(amdgim_vf2pf_info));
AMDGPU_FW_VRAM_VF2PF_READ(adev, driver_version,
&str);
#ifdef MODULE
if (THIS_MODULE->version != NULL)
strcpy(str, THIS_MODULE->version);
else
#endif
strcpy(str, "N/A");
AMDGPU_FW_VRAM_VF2PF_WRITE(adev, driver_cert,
0);
AMDGPU_FW_VRAM_VF2PF_WRITE(adev, checksum,
amdgpu_virt_fw_reserve_get_checksum(
adev->virt.fw_reserve.p_vf2pf,
pf2vf_size,
adev->virt.fw_reserve.checksum_key, 0));
}
}
}
}

View File

@ -36,6 +36,18 @@ struct amdgpu_mm_table {
uint64_t gpu_addr;
};
#define AMDGPU_VF_ERROR_ENTRY_SIZE 16
/* struct error_entry - amdgpu VF error information. */
struct amdgpu_vf_error_buffer {
struct mutex lock;
int read_count;
int write_count;
uint16_t code[AMDGPU_VF_ERROR_ENTRY_SIZE];
uint16_t flags[AMDGPU_VF_ERROR_ENTRY_SIZE];
uint64_t data[AMDGPU_VF_ERROR_ENTRY_SIZE];
};
/**
* struct amdgpu_virt_ops - amdgpu device virt operations
*/
@ -46,6 +58,179 @@ struct amdgpu_virt_ops {
void (*trans_msg)(struct amdgpu_device *adev, u32 req, u32 data1, u32 data2, u32 data3);
};
/*
* Firmware Reserve Frame buffer
*/
struct amdgpu_virt_fw_reserve {
struct amdgim_pf2vf_info_header *p_pf2vf;
struct amdgim_vf2pf_info_header *p_vf2pf;
unsigned int checksum_key;
};
/*
* Defination between PF and VF
* Structures forcibly aligned to 4 to keep the same style as PF.
*/
#define AMDGIM_DATAEXCHANGE_OFFSET (64 * 1024)
#define AMDGIM_GET_STRUCTURE_RESERVED_SIZE(total, u8, u16, u32, u64) \
(total - (((u8)+3) / 4 + ((u16)+1) / 2 + (u32) + (u64)*2))
enum AMDGIM_FEATURE_FLAG {
/* GIM supports feature of Error log collecting */
AMDGIM_FEATURE_ERROR_LOG_COLLECT = 0x1,
/* GIM supports feature of loading uCodes */
AMDGIM_FEATURE_GIM_LOAD_UCODES = 0x2,
};
struct amdgim_pf2vf_info_header {
/* the total structure size in byte. */
uint32_t size;
/* version of this structure, written by the GIM */
uint32_t version;
} __aligned(4);
struct amdgim_pf2vf_info_v1 {
/* header contains size and version */
struct amdgim_pf2vf_info_header header;
/* max_width * max_height */
unsigned int uvd_enc_max_pixels_count;
/* 16x16 pixels/sec, codec independent */
unsigned int uvd_enc_max_bandwidth;
/* max_width * max_height */
unsigned int vce_enc_max_pixels_count;
/* 16x16 pixels/sec, codec independent */
unsigned int vce_enc_max_bandwidth;
/* MEC FW position in kb from the start of visible frame buffer */
unsigned int mecfw_kboffset;
/* The features flags of the GIM driver supports. */
unsigned int feature_flags;
/* use private key from mailbox 2 to create chueksum */
unsigned int checksum;
} __aligned(4);
struct amdgim_pf2vf_info_v2 {
/* header contains size and version */
struct amdgim_pf2vf_info_header header;
/* use private key from mailbox 2 to create chueksum */
uint32_t checksum;
/* The features flags of the GIM driver supports. */
uint32_t feature_flags;
/* max_width * max_height */
uint32_t uvd_enc_max_pixels_count;
/* 16x16 pixels/sec, codec independent */
uint32_t uvd_enc_max_bandwidth;
/* max_width * max_height */
uint32_t vce_enc_max_pixels_count;
/* 16x16 pixels/sec, codec independent */
uint32_t vce_enc_max_bandwidth;
/* MEC FW position in kb from the start of VF visible frame buffer */
uint64_t mecfw_kboffset;
/* MEC FW size in KB */
uint32_t mecfw_ksize;
/* UVD FW position in kb from the start of VF visible frame buffer */
uint64_t uvdfw_kboffset;
/* UVD FW size in KB */
uint32_t uvdfw_ksize;
/* VCE FW position in kb from the start of VF visible frame buffer */
uint64_t vcefw_kboffset;
/* VCE FW size in KB */
uint32_t vcefw_ksize;
uint32_t reserved[AMDGIM_GET_STRUCTURE_RESERVED_SIZE(256, 0, 0, (9 + sizeof(struct amdgim_pf2vf_info_header)/sizeof(uint32_t)), 3)];
} __aligned(4);
struct amdgim_vf2pf_info_header {
/* the total structure size in byte. */
uint32_t size;
/*version of this structure, written by the guest */
uint32_t version;
} __aligned(4);
struct amdgim_vf2pf_info_v1 {
/* header contains size and version */
struct amdgim_vf2pf_info_header header;
/* driver version */
char driver_version[64];
/* driver certification, 1=WHQL, 0=None */
unsigned int driver_cert;
/* guest OS type and version: need a define */
unsigned int os_info;
/* in the unit of 1M */
unsigned int fb_usage;
/* guest gfx engine usage percentage */
unsigned int gfx_usage;
/* guest gfx engine health percentage */
unsigned int gfx_health;
/* guest compute engine usage percentage */
unsigned int compute_usage;
/* guest compute engine health percentage */
unsigned int compute_health;
/* guest vce engine usage percentage. 0xffff means N/A. */
unsigned int vce_enc_usage;
/* guest vce engine health percentage. 0xffff means N/A. */
unsigned int vce_enc_health;
/* guest uvd engine usage percentage. 0xffff means N/A. */
unsigned int uvd_enc_usage;
/* guest uvd engine usage percentage. 0xffff means N/A. */
unsigned int uvd_enc_health;
unsigned int checksum;
} __aligned(4);
struct amdgim_vf2pf_info_v2 {
/* header contains size and version */
struct amdgim_vf2pf_info_header header;
uint32_t checksum;
/* driver version */
uint8_t driver_version[64];
/* driver certification, 1=WHQL, 0=None */
uint32_t driver_cert;
/* guest OS type and version: need a define */
uint32_t os_info;
/* in the unit of 1M */
uint32_t fb_usage;
/* guest gfx engine usage percentage */
uint32_t gfx_usage;
/* guest gfx engine health percentage */
uint32_t gfx_health;
/* guest compute engine usage percentage */
uint32_t compute_usage;
/* guest compute engine health percentage */
uint32_t compute_health;
/* guest vce engine usage percentage. 0xffff means N/A. */
uint32_t vce_enc_usage;
/* guest vce engine health percentage. 0xffff means N/A. */
uint32_t vce_enc_health;
/* guest uvd engine usage percentage. 0xffff means N/A. */
uint32_t uvd_enc_usage;
/* guest uvd engine usage percentage. 0xffff means N/A. */
uint32_t uvd_enc_health;
uint32_t reserved[AMDGIM_GET_STRUCTURE_RESERVED_SIZE(256, 64, 0, (12 + sizeof(struct amdgim_vf2pf_info_header)/sizeof(uint32_t)), 0)];
} __aligned(4);
#define AMDGPU_FW_VRAM_VF2PF_VER 2
typedef struct amdgim_vf2pf_info_v2 amdgim_vf2pf_info ;
#define AMDGPU_FW_VRAM_VF2PF_WRITE(adev, field, val) \
do { \
((amdgim_vf2pf_info *)adev->virt.fw_reserve.p_vf2pf)->field = (val); \
} while (0)
#define AMDGPU_FW_VRAM_VF2PF_READ(adev, field, val) \
do { \
(*val) = ((amdgim_vf2pf_info *)adev->virt.fw_reserve.p_vf2pf)->field; \
} while (0)
#define AMDGPU_FW_VRAM_PF2VF_READ(adev, field, val) \
do { \
if (!adev->virt.fw_reserve.p_pf2vf) \
*(val) = 0; \
else { \
if (adev->virt.fw_reserve.p_pf2vf->version == 1) \
*(val) = ((struct amdgim_pf2vf_info_v1 *)adev->virt.fw_reserve.p_pf2vf)->field; \
if (adev->virt.fw_reserve.p_pf2vf->version == 2) \
*(val) = ((struct amdgim_pf2vf_info_v2 *)adev->virt.fw_reserve.p_pf2vf)->field; \
} \
} while (0)
/* GPU virtualization */
struct amdgpu_virt {
uint32_t caps;
@ -59,6 +244,8 @@ struct amdgpu_virt {
struct work_struct flr_work;
struct amdgpu_mm_table mm_table;
const struct amdgpu_virt_ops *ops;
struct amdgpu_vf_error_buffer vf_errors;
struct amdgpu_virt_fw_reserve fw_reserve;
};
#define AMDGPU_CSA_SIZE (8 * 1024)
@ -101,5 +288,9 @@ int amdgpu_virt_reset_gpu(struct amdgpu_device *adev);
int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, struct amdgpu_job *job);
int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev);
void amdgpu_virt_free_mm_table(struct amdgpu_device *adev);
int amdgpu_virt_fw_reserve_get_checksum(void *obj, unsigned long obj_size,
unsigned int key,
unsigned int chksum);
void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev);
#endif

File diff suppressed because it is too large Load Diff

View File

@ -25,6 +25,7 @@
#define __AMDGPU_VM_H__
#include <linux/rbtree.h>
#include <linux/idr.h>
#include "gpu_scheduler.h"
#include "amdgpu_sync.h"
@ -72,6 +73,16 @@ struct amdgpu_bo_list_entry;
#define AMDGPU_PTE_MTYPE(a) ((uint64_t)a << 57)
#define AMDGPU_PTE_MTYPE_MASK AMDGPU_PTE_MTYPE(3ULL)
/* For Raven */
#define AMDGPU_MTYPE_CC 2
#define AMDGPU_PTE_DEFAULT_ATC (AMDGPU_PTE_SYSTEM \
| AMDGPU_PTE_SNOOPED \
| AMDGPU_PTE_EXECUTABLE \
| AMDGPU_PTE_READABLE \
| AMDGPU_PTE_WRITEABLE \
| AMDGPU_PTE_MTYPE(AMDGPU_MTYPE_CC))
/* How to programm VM fault handling */
#define AMDGPU_VM_FAULT_STOP_NEVER 0
#define AMDGPU_VM_FAULT_STOP_FIRST 1
@ -105,17 +116,24 @@ struct amdgpu_vm_bo_base {
/* protected by spinlock */
struct list_head vm_status;
/* protected by the BO being reserved */
bool moved;
};
struct amdgpu_vm_pt {
struct amdgpu_bo *bo;
uint64_t addr;
struct amdgpu_vm_bo_base base;
uint64_t addr;
/* array of page tables, one for each directory entry */
struct amdgpu_vm_pt *entries;
unsigned last_entry_used;
struct amdgpu_vm_pt *entries;
unsigned last_entry_used;
};
#define AMDGPU_VM_FAULT(pasid, addr) (((u64)(pasid) << 48) | (addr))
#define AMDGPU_VM_FAULT_PASID(fault) ((u64)(fault) >> 48)
#define AMDGPU_VM_FAULT_ADDR(fault) ((u64)(fault) & 0xfffffffff000ULL)
struct amdgpu_vm {
/* tree of virtual addresses mapped */
struct rb_root_cached va;
@ -123,19 +141,21 @@ struct amdgpu_vm {
/* protecting invalidated */
spinlock_t status_lock;
/* BOs who needs a validation */
struct list_head evicted;
/* PT BOs which relocated and their parent need an update */
struct list_head relocated;
/* BOs moved, but not yet updated in the PT */
struct list_head moved;
/* BOs cleared in the PT because of a move */
struct list_head cleared;
/* BO mappings freed, but not yet updated in the PT */
struct list_head freed;
/* contains the page directory */
struct amdgpu_vm_pt root;
struct dma_fence *last_dir_update;
uint64_t last_eviction_counter;
struct dma_fence *last_update;
/* protecting freed */
spinlock_t freed_lock;
@ -143,8 +163,9 @@ struct amdgpu_vm {
/* Scheduler entity for page table updates */
struct amd_sched_entity entity;
/* client id */
/* client id and PASID (TODO: replace client_id with PASID) */
u64 client_id;
unsigned int pasid;
/* dedicated to vm */
struct amdgpu_vm_id *reserved_vmid[AMDGPU_MAX_VMHUBS];
@ -153,6 +174,12 @@ struct amdgpu_vm {
/* Flag to indicate ATS support from PTE for GFX9 */
bool pte_support_ats;
/* Up to 128 pending retry page faults */
DECLARE_KFIFO(faults, u64, 128);
/* Limit non-retry fault storms */
unsigned int fault_credit;
};
struct amdgpu_vm_id {
@ -215,16 +242,27 @@ struct amdgpu_vm_manager {
* BIT1[= 0] Compute updated by SDMA [= 1] by CPU
*/
int vm_update_mode;
/* PASID to VM mapping, will be used in interrupt context to
* look up VM of a page fault
*/
struct idr pasid_idr;
spinlock_t pasid_lock;
};
int amdgpu_vm_alloc_pasid(unsigned int bits);
void amdgpu_vm_free_pasid(unsigned int pasid);
void amdgpu_vm_manager_init(struct amdgpu_device *adev);
void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
int vm_context);
int vm_context, unsigned int pasid);
void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
bool amdgpu_vm_pasid_fault_credit(struct amdgpu_device *adev,
unsigned int pasid);
void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
struct list_head *validated,
struct amdgpu_bo_list_entry *entry);
bool amdgpu_vm_ready(struct amdgpu_vm *vm);
int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
int (*callback)(void *p, struct amdgpu_bo *bo),
void *param);
@ -243,13 +281,13 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev,
int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
struct dma_fence **fence);
int amdgpu_vm_clear_moved(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct amdgpu_sync *sync);
int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
struct amdgpu_vm *vm);
int amdgpu_vm_bo_update(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
bool clear);
void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
struct amdgpu_bo *bo);
struct amdgpu_bo *bo, bool evicted);
struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
struct amdgpu_bo *bo);
struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
@ -269,6 +307,8 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
uint64_t saddr, uint64_t size);
struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
uint64_t addr);
void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va);
void amdgpu_vm_set_fragment_size(struct amdgpu_device *adev,

View File

@ -1343,8 +1343,11 @@ struct atom_context *amdgpu_atom_parse(struct card_info *card, void *bios)
idx = 0x80;
str = CSTR(idx);
if (*str != '\0')
if (*str != '\0') {
pr_info("ATOM BIOS: %s\n", str);
strlcpy(ctx->vbios_version, str, sizeof(ctx->vbios_version));
}
return ctx;
}

View File

@ -140,6 +140,7 @@ struct atom_context {
int io_mode;
uint32_t *scratch;
int scratch_size_bytes;
char vbios_version[20];
};
extern int amdgpu_atom_debug;

View File

@ -307,7 +307,6 @@ static int ci_set_power_limit(struct amdgpu_device *adev, u32 n);
static int ci_set_overdrive_target_tdp(struct amdgpu_device *adev,
u32 target_tdp);
static int ci_update_uvd_dpm(struct amdgpu_device *adev, bool gate);
static void ci_dpm_set_dpm_funcs(struct amdgpu_device *adev);
static void ci_dpm_set_irq_funcs(struct amdgpu_device *adev);
static PPSMC_Result amdgpu_ci_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
@ -883,8 +882,9 @@ static int ci_power_control_set_level(struct amdgpu_device *adev)
return ret;
}
static void ci_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate)
static void ci_dpm_powergate_uvd(void *handle, bool gate)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev);
pi->uvd_power_gated = gate;
@ -901,8 +901,9 @@ static void ci_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate)
}
}
static bool ci_dpm_vblank_too_short(struct amdgpu_device *adev)
static bool ci_dpm_vblank_too_short(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
u32 switch_limit = adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 300;
@ -1210,11 +1211,12 @@ static int ci_fan_ctrl_stop_smc_fan_control(struct amdgpu_device *adev)
}
}
static int ci_dpm_get_fan_speed_percent(struct amdgpu_device *adev,
static int ci_dpm_get_fan_speed_percent(void *handle,
u32 *speed)
{
u32 duty, duty100;
u64 tmp64;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->pm.no_fan)
return -ENOENT;
@ -1237,12 +1239,13 @@ static int ci_dpm_get_fan_speed_percent(struct amdgpu_device *adev,
return 0;
}
static int ci_dpm_set_fan_speed_percent(struct amdgpu_device *adev,
static int ci_dpm_set_fan_speed_percent(void *handle,
u32 speed)
{
u32 tmp;
u32 duty, duty100;
u64 tmp64;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev);
if (adev->pm.no_fan)
@ -1271,8 +1274,10 @@ static int ci_dpm_set_fan_speed_percent(struct amdgpu_device *adev,
return 0;
}
static void ci_dpm_set_fan_control_mode(struct amdgpu_device *adev, u32 mode)
static void ci_dpm_set_fan_control_mode(void *handle, u32 mode)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
switch (mode) {
case AMD_FAN_CTRL_NONE:
if (adev->pm.dpm.fan.ucode_fan_control)
@ -1292,8 +1297,9 @@ static void ci_dpm_set_fan_control_mode(struct amdgpu_device *adev, u32 mode)
}
}
static u32 ci_dpm_get_fan_control_mode(struct amdgpu_device *adev)
static u32 ci_dpm_get_fan_control_mode(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev);
if (pi->fan_is_controlled_by_smc)
@ -4378,9 +4384,10 @@ static u32 ci_get_lowest_enabled_level(struct amdgpu_device *adev,
}
static int ci_dpm_force_performance_level(struct amdgpu_device *adev,
static int ci_dpm_force_performance_level(void *handle,
enum amd_dpm_forced_level level)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev);
u32 tmp, levels, i;
int ret;
@ -5291,8 +5298,9 @@ static void ci_update_requested_ps(struct amdgpu_device *adev,
adev->pm.dpm.requested_ps = &pi->requested_rps;
}
static int ci_dpm_pre_set_power_state(struct amdgpu_device *adev)
static int ci_dpm_pre_set_power_state(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev);
struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps;
struct amdgpu_ps *new_ps = &requested_ps;
@ -5304,8 +5312,9 @@ static int ci_dpm_pre_set_power_state(struct amdgpu_device *adev)
return 0;
}
static void ci_dpm_post_set_power_state(struct amdgpu_device *adev)
static void ci_dpm_post_set_power_state(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev);
struct amdgpu_ps *new_ps = &pi->requested_rps;
@ -5479,8 +5488,9 @@ static void ci_dpm_disable(struct amdgpu_device *adev)
ci_update_current_ps(adev, boot_ps);
}
static int ci_dpm_set_power_state(struct amdgpu_device *adev)
static int ci_dpm_set_power_state(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev);
struct amdgpu_ps *new_ps = &pi->requested_rps;
struct amdgpu_ps *old_ps = &pi->current_rps;
@ -5551,8 +5561,10 @@ static void ci_dpm_reset_asic(struct amdgpu_device *adev)
}
#endif
static void ci_dpm_display_configuration_changed(struct amdgpu_device *adev)
static void ci_dpm_display_configuration_changed(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
ci_program_display_gap(adev);
}
@ -6105,9 +6117,10 @@ static int ci_dpm_init(struct amdgpu_device *adev)
}
static void
ci_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
ci_dpm_debugfs_print_current_performance_level(void *handle,
struct seq_file *m)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev);
struct amdgpu_ps *rps = &pi->current_rps;
u32 sclk = ci_get_average_sclk_freq(adev);
@ -6131,12 +6144,13 @@ ci_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
seq_printf(m, "GPU load: %u %%\n", activity_percent);
}
static void ci_dpm_print_power_state(struct amdgpu_device *adev,
struct amdgpu_ps *rps)
static void ci_dpm_print_power_state(void *handle, void *current_ps)
{
struct amdgpu_ps *rps = (struct amdgpu_ps *)current_ps;
struct ci_ps *ps = ci_get_ps(rps);
struct ci_pl *pl;
int i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
amdgpu_dpm_print_class_info(rps->class, rps->class2);
amdgpu_dpm_print_cap_info(rps->caps);
@ -6158,20 +6172,23 @@ static inline bool ci_are_power_levels_equal(const struct ci_pl *ci_cpl1,
(ci_cpl1->pcie_lane == ci_cpl2->pcie_lane));
}
static int ci_check_state_equal(struct amdgpu_device *adev,
struct amdgpu_ps *cps,
struct amdgpu_ps *rps,
static int ci_check_state_equal(void *handle,
void *current_ps,
void *request_ps,
bool *equal)
{
struct ci_ps *ci_cps;
struct ci_ps *ci_rps;
int i;
struct amdgpu_ps *cps = (struct amdgpu_ps *)current_ps;
struct amdgpu_ps *rps = (struct amdgpu_ps *)request_ps;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev == NULL || cps == NULL || rps == NULL || equal == NULL)
return -EINVAL;
ci_cps = ci_get_ps(cps);
ci_rps = ci_get_ps(rps);
ci_cps = ci_get_ps((struct amdgpu_ps *)cps);
ci_rps = ci_get_ps((struct amdgpu_ps *)rps);
if (ci_cps == NULL) {
*equal = false;
@ -6199,8 +6216,9 @@ static int ci_check_state_equal(struct amdgpu_device *adev,
return 0;
}
static u32 ci_dpm_get_sclk(struct amdgpu_device *adev, bool low)
static u32 ci_dpm_get_sclk(void *handle, bool low)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev);
struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
@ -6210,8 +6228,9 @@ static u32 ci_dpm_get_sclk(struct amdgpu_device *adev, bool low)
return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk;
}
static u32 ci_dpm_get_mclk(struct amdgpu_device *adev, bool low)
static u32 ci_dpm_get_mclk(void *handle, bool low)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev);
struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
@ -6222,10 +6241,11 @@ static u32 ci_dpm_get_mclk(struct amdgpu_device *adev, bool low)
}
/* get temperature in millidegrees */
static int ci_dpm_get_temp(struct amdgpu_device *adev)
static int ci_dpm_get_temp(void *handle)
{
u32 temp;
int actual_temp = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
temp = (RREG32_SMC(ixCG_MULT_THERMAL_STATUS) & CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK) >>
CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT;
@ -6261,7 +6281,6 @@ static int ci_dpm_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
ci_dpm_set_dpm_funcs(adev);
ci_dpm_set_irq_funcs(adev);
return 0;
@ -6346,7 +6365,6 @@ static int ci_dpm_sw_fini(void *handle)
flush_work(&adev->pm.dpm.thermal.work);
mutex_lock(&adev->pm.mutex);
amdgpu_pm_sysfs_fini(adev);
ci_dpm_fini(adev);
mutex_unlock(&adev->pm.mutex);
@ -6551,9 +6569,10 @@ static int ci_dpm_set_powergating_state(void *handle,
return 0;
}
static int ci_dpm_print_clock_levels(struct amdgpu_device *adev,
static int ci_dpm_print_clock_levels(void *handle,
enum pp_clock_type type, char *buf)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev);
struct ci_single_dpm_table *sclk_table = &pi->dpm_table.sclk_table;
struct ci_single_dpm_table *mclk_table = &pi->dpm_table.mclk_table;
@ -6618,9 +6637,10 @@ static int ci_dpm_print_clock_levels(struct amdgpu_device *adev,
return size;
}
static int ci_dpm_force_clock_level(struct amdgpu_device *adev,
static int ci_dpm_force_clock_level(void *handle,
enum pp_clock_type type, uint32_t mask)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev);
if (adev->pm.dpm.forced_level & (AMD_DPM_FORCED_LEVEL_AUTO |
@ -6664,8 +6684,9 @@ static int ci_dpm_force_clock_level(struct amdgpu_device *adev,
return 0;
}
static int ci_dpm_get_sclk_od(struct amdgpu_device *adev)
static int ci_dpm_get_sclk_od(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev);
struct ci_single_dpm_table *sclk_table = &(pi->dpm_table.sclk_table);
struct ci_single_dpm_table *golden_sclk_table =
@ -6680,8 +6701,9 @@ static int ci_dpm_get_sclk_od(struct amdgpu_device *adev)
return value;
}
static int ci_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value)
static int ci_dpm_set_sclk_od(void *handle, uint32_t value)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev);
struct ci_ps *ps = ci_get_ps(adev->pm.dpm.requested_ps);
struct ci_single_dpm_table *golden_sclk_table =
@ -6698,8 +6720,9 @@ static int ci_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value)
return 0;
}
static int ci_dpm_get_mclk_od(struct amdgpu_device *adev)
static int ci_dpm_get_mclk_od(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev);
struct ci_single_dpm_table *mclk_table = &(pi->dpm_table.mclk_table);
struct ci_single_dpm_table *golden_mclk_table =
@ -6714,8 +6737,9 @@ static int ci_dpm_get_mclk_od(struct amdgpu_device *adev)
return value;
}
static int ci_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value)
static int ci_dpm_set_mclk_od(void *handle, uint32_t value)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev);
struct ci_ps *ps = ci_get_ps(adev->pm.dpm.requested_ps);
struct ci_single_dpm_table *golden_mclk_table =
@ -6732,9 +6756,10 @@ static int ci_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value)
return 0;
}
static int ci_dpm_get_power_profile_state(struct amdgpu_device *adev,
static int ci_dpm_get_power_profile_state(void *handle,
struct amd_pp_profile *query)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev);
if (!pi || !query)
@ -6851,9 +6876,10 @@ static int ci_set_power_profile_state(struct amdgpu_device *adev,
return result;
}
static int ci_dpm_set_power_profile_state(struct amdgpu_device *adev,
static int ci_dpm_set_power_profile_state(void *handle,
struct amd_pp_profile *request)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev);
int ret = -1;
@ -6906,9 +6932,10 @@ static int ci_dpm_set_power_profile_state(struct amdgpu_device *adev,
return 0;
}
static int ci_dpm_reset_power_profile_state(struct amdgpu_device *adev,
static int ci_dpm_reset_power_profile_state(void *handle,
struct amd_pp_profile *request)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev);
if (!pi || !request)
@ -6927,9 +6954,10 @@ static int ci_dpm_reset_power_profile_state(struct amdgpu_device *adev,
return -EINVAL;
}
static int ci_dpm_switch_power_profile(struct amdgpu_device *adev,
static int ci_dpm_switch_power_profile(void *handle,
enum amd_pp_profile_type type)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev);
struct amd_pp_profile request = {0};
@ -6944,11 +6972,12 @@ static int ci_dpm_switch_power_profile(struct amdgpu_device *adev,
return 0;
}
static int ci_dpm_read_sensor(struct amdgpu_device *adev, int idx,
static int ci_dpm_read_sensor(void *handle, int idx,
void *value, int *size)
{
u32 activity_percent = 50;
int ret;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* size must be at least 4 bytes for all sensors */
if (*size < 4)
@ -7003,7 +7032,7 @@ const struct amd_ip_funcs ci_dpm_ip_funcs = {
.set_powergating_state = ci_dpm_set_powergating_state,
};
static const struct amdgpu_dpm_funcs ci_dpm_funcs = {
const struct amd_pm_funcs ci_dpm_funcs = {
.get_temperature = &ci_dpm_get_temp,
.pre_set_power_state = &ci_dpm_pre_set_power_state,
.set_power_state = &ci_dpm_set_power_state,
@ -7035,12 +7064,6 @@ static const struct amdgpu_dpm_funcs ci_dpm_funcs = {
.read_sensor = ci_dpm_read_sensor,
};
static void ci_dpm_set_dpm_funcs(struct amdgpu_device *adev)
{
if (adev->pm.funcs == NULL)
adev->pm.funcs = &ci_dpm_funcs;
}
static const struct amdgpu_irq_src_funcs ci_dpm_irq_funcs = {
.set = ci_dpm_set_interrupt_state,
.process = ci_dpm_process_interrupt,

View File

@ -26,5 +26,6 @@
extern const struct amd_ip_funcs ci_dpm_ip_funcs;
extern const struct amd_ip_funcs kv_dpm_ip_funcs;
extern const struct amd_pm_funcs ci_dpm_funcs;
extern const struct amd_pm_funcs kv_dpm_funcs;
#endif

View File

@ -228,6 +228,34 @@ static u32 cik_ih_get_wptr(struct amdgpu_device *adev)
* [127:96] - reserved
*/
/**
* cik_ih_prescreen_iv - prescreen an interrupt vector
*
* @adev: amdgpu_device pointer
*
* Returns true if the interrupt vector should be further processed.
*/
static bool cik_ih_prescreen_iv(struct amdgpu_device *adev)
{
u32 ring_index = adev->irq.ih.rptr >> 2;
u16 pasid;
switch (le32_to_cpu(adev->irq.ih.ring[ring_index]) & 0xff) {
case 146:
case 147:
pasid = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]) >> 16;
if (!pasid || amdgpu_vm_pasid_fault_credit(adev, pasid))
return true;
break;
default:
/* Not a VM fault */
return true;
}
adev->irq.ih.rptr += 16;
return false;
}
/**
* cik_ih_decode_iv - decode an interrupt vector
*
@ -433,6 +461,7 @@ static const struct amd_ip_funcs cik_ih_ip_funcs = {
static const struct amdgpu_ih_funcs cik_ih_funcs = {
.get_wptr = cik_ih_get_wptr,
.prescreen_iv = cik_ih_prescreen_iv,
.decode_iv = cik_ih_decode_iv,
.set_rptr = cik_ih_set_rptr
};

View File

@ -1387,8 +1387,13 @@ static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev)
}
static const struct amdgpu_vm_pte_funcs cik_sdma_vm_pte_funcs = {
.copy_pte_num_dw = 7,
.copy_pte = cik_sdma_vm_copy_pte,
.write_pte = cik_sdma_vm_write_pte,
.set_max_nums_pte_pde = 0x1fffff >> 3,
.set_pte_pde_num_dw = 10,
.set_pte_pde = cik_sdma_vm_set_pte_pde,
};

View File

@ -207,6 +207,34 @@ static u32 cz_ih_get_wptr(struct amdgpu_device *adev)
return (wptr & adev->irq.ih.ptr_mask);
}
/**
* cz_ih_prescreen_iv - prescreen an interrupt vector
*
* @adev: amdgpu_device pointer
*
* Returns true if the interrupt vector should be further processed.
*/
static bool cz_ih_prescreen_iv(struct amdgpu_device *adev)
{
u32 ring_index = adev->irq.ih.rptr >> 2;
u16 pasid;
switch (le32_to_cpu(adev->irq.ih.ring[ring_index]) & 0xff) {
case 146:
case 147:
pasid = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]) >> 16;
if (!pasid || amdgpu_vm_pasid_fault_credit(adev, pasid))
return true;
break;
default:
/* Not a VM fault */
return true;
}
adev->irq.ih.rptr += 16;
return false;
}
/**
* cz_ih_decode_iv - decode an interrupt vector
*
@ -414,6 +442,7 @@ static const struct amd_ip_funcs cz_ih_ip_funcs = {
static const struct amdgpu_ih_funcs cz_ih_funcs = {
.get_wptr = cz_ih_get_wptr,
.prescreen_iv = cz_ih_prescreen_iv,
.decode_iv = cz_ih_decode_iv,
.set_rptr = cz_ih_set_rptr
};

View File

@ -288,7 +288,7 @@ dce_virtual_encoder(struct drm_connector *connector)
if (connector->encoder_ids[i] == 0)
break;
encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]);
encoder = drm_encoder_find(connector->dev, NULL, connector->encoder_ids[i]);
if (!encoder)
continue;
@ -298,7 +298,7 @@ dce_virtual_encoder(struct drm_connector *connector)
/* pick the first one */
if (enc_id)
return drm_encoder_find(connector->dev, enc_id);
return drm_encoder_find(connector->dev, NULL, enc_id);
return NULL;
}

View File

@ -20,6 +20,7 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <linux/kernel.h>
#include <linux/firmware.h>
#include <drm/drmP.h>
#include "amdgpu.h"
@ -125,24 +126,39 @@ MODULE_FIRMWARE("amdgpu/fiji_mec2.bin");
MODULE_FIRMWARE("amdgpu/fiji_rlc.bin");
MODULE_FIRMWARE("amdgpu/polaris11_ce.bin");
MODULE_FIRMWARE("amdgpu/polaris11_ce_2.bin");
MODULE_FIRMWARE("amdgpu/polaris11_pfp.bin");
MODULE_FIRMWARE("amdgpu/polaris11_pfp_2.bin");
MODULE_FIRMWARE("amdgpu/polaris11_me.bin");
MODULE_FIRMWARE("amdgpu/polaris11_me_2.bin");
MODULE_FIRMWARE("amdgpu/polaris11_mec.bin");
MODULE_FIRMWARE("amdgpu/polaris11_mec_2.bin");
MODULE_FIRMWARE("amdgpu/polaris11_mec2.bin");
MODULE_FIRMWARE("amdgpu/polaris11_mec2_2.bin");
MODULE_FIRMWARE("amdgpu/polaris11_rlc.bin");
MODULE_FIRMWARE("amdgpu/polaris10_ce.bin");
MODULE_FIRMWARE("amdgpu/polaris10_ce_2.bin");
MODULE_FIRMWARE("amdgpu/polaris10_pfp.bin");
MODULE_FIRMWARE("amdgpu/polaris10_pfp_2.bin");
MODULE_FIRMWARE("amdgpu/polaris10_me.bin");
MODULE_FIRMWARE("amdgpu/polaris10_me_2.bin");
MODULE_FIRMWARE("amdgpu/polaris10_mec.bin");
MODULE_FIRMWARE("amdgpu/polaris10_mec_2.bin");
MODULE_FIRMWARE("amdgpu/polaris10_mec2.bin");
MODULE_FIRMWARE("amdgpu/polaris10_mec2_2.bin");
MODULE_FIRMWARE("amdgpu/polaris10_rlc.bin");
MODULE_FIRMWARE("amdgpu/polaris12_ce.bin");
MODULE_FIRMWARE("amdgpu/polaris12_ce_2.bin");
MODULE_FIRMWARE("amdgpu/polaris12_pfp.bin");
MODULE_FIRMWARE("amdgpu/polaris12_pfp_2.bin");
MODULE_FIRMWARE("amdgpu/polaris12_me.bin");
MODULE_FIRMWARE("amdgpu/polaris12_me_2.bin");
MODULE_FIRMWARE("amdgpu/polaris12_mec.bin");
MODULE_FIRMWARE("amdgpu/polaris12_mec_2.bin");
MODULE_FIRMWARE("amdgpu/polaris12_mec2.bin");
MODULE_FIRMWARE("amdgpu/polaris12_mec2_2.bin");
MODULE_FIRMWARE("amdgpu/polaris12_rlc.bin");
static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] =
@ -918,8 +934,17 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
BUG();
}
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp_2.bin", chip_name);
err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
if (err == -ENOENT) {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
}
} else {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
}
if (err)
goto out;
err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
@ -929,8 +954,17 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me_2.bin", chip_name);
err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
if (err == -ENOENT) {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
}
} else {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
}
if (err)
goto out;
err = amdgpu_ucode_validate(adev->gfx.me_fw);
@ -941,8 +975,17 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce_2.bin", chip_name);
err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
if (err == -ENOENT) {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
}
} else {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
}
if (err)
goto out;
err = amdgpu_ucode_validate(adev->gfx.ce_fw);
@ -1012,8 +1055,17 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
for (i = 0 ; i < (rlc_hdr->reg_list_size_bytes >> 2); i++)
adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]);
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec_2.bin", chip_name);
err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
if (err == -ENOENT) {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
}
} else {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
}
if (err)
goto out;
err = amdgpu_ucode_validate(adev->gfx.mec_fw);
@ -1025,8 +1077,17 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
if ((adev->asic_type != CHIP_STONEY) &&
(adev->asic_type != CHIP_TOPAZ)) {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2_2.bin", chip_name);
err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
if (err == -ENOENT) {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
}
} else {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
}
if (!err) {
err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
if (err)
@ -2053,6 +2114,7 @@ static int gfx_v8_0_sw_fini(void *handle)
amdgpu_gfx_compute_mqd_sw_fini(adev);
amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring, &adev->gfx.kiq.irq);
amdgpu_gfx_kiq_fini(adev);
amdgpu_bo_free_kernel(&adev->virt.csa_obj, &adev->virt.csa_vmid0_addr, NULL);
gfx_v8_0_mec_fini(adev);
gfx_v8_0_rlc_fini(adev);
@ -3891,10 +3953,10 @@ static int gfx_v8_0_init_save_restore_list(struct amdgpu_device *adev)
adev->gfx.rlc.reg_list_format_size_bytes >> 2,
unique_indices,
&indices_count,
sizeof(unique_indices) / sizeof(int),
ARRAY_SIZE(unique_indices),
indirect_start_offsets,
&offset_count,
sizeof(indirect_start_offsets)/sizeof(int));
ARRAY_SIZE(indirect_start_offsets));
/* save and restore list */
WREG32_FIELD(RLC_SRM_CNTL, AUTO_INCR_ADDR, 1);
@ -3916,14 +3978,14 @@ static int gfx_v8_0_init_save_restore_list(struct amdgpu_device *adev)
/* starting offsets starts */
WREG32(mmRLC_GPM_SCRATCH_ADDR,
adev->gfx.rlc.starting_offsets_start);
for (i = 0; i < sizeof(indirect_start_offsets)/sizeof(int); i++)
for (i = 0; i < ARRAY_SIZE(indirect_start_offsets); i++)
WREG32(mmRLC_GPM_SCRATCH_DATA,
indirect_start_offsets[i]);
/* unique indices */
temp = mmRLC_SRM_INDEX_CNTL_ADDR_0;
data = mmRLC_SRM_INDEX_CNTL_DATA_0;
for (i = 0; i < sizeof(unique_indices) / sizeof(int); i++) {
for (i = 0; i < ARRAY_SIZE(unique_indices); i++) {
if (unique_indices[i] != 0) {
WREG32(temp + i, unique_indices[i] & 0x3FFFF);
WREG32(data + i, unique_indices[i] >> 20);
@ -4071,18 +4133,12 @@ static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev)
gfx_v8_0_rlc_reset(adev);
gfx_v8_0_init_pg(adev);
if (!adev->pp_enabled) {
if (adev->firmware.load_type != AMDGPU_FW_LOAD_SMU) {
/* legacy rlc firmware loading */
r = gfx_v8_0_rlc_load_microcode(adev);
if (r)
return r;
} else {
r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
AMDGPU_UCODE_ID_RLC_G);
if (r)
return -EINVAL;
}
if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
/* legacy rlc firmware loading */
r = gfx_v8_0_rlc_load_microcode(adev);
if (r)
return r;
}
gfx_v8_0_rlc_start(adev);
@ -4577,12 +4633,10 @@ static int gfx_v8_0_mqd_init(struct amdgpu_ring *ring)
mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
mqd->compute_misc_reserved = 0x00000003;
if (!(adev->flags & AMD_IS_APU)) {
mqd->dynamic_cu_mask_addr_lo = lower_32_bits(ring->mqd_gpu_addr
+ offsetof(struct vi_mqd_allocation, dynamic_cu_mask));
mqd->dynamic_cu_mask_addr_hi = upper_32_bits(ring->mqd_gpu_addr
+ offsetof(struct vi_mqd_allocation, dynamic_cu_mask));
}
mqd->dynamic_cu_mask_addr_lo = lower_32_bits(ring->mqd_gpu_addr
+ offsetof(struct vi_mqd_allocation, dynamic_cu_mask));
mqd->dynamic_cu_mask_addr_hi = upper_32_bits(ring->mqd_gpu_addr
+ offsetof(struct vi_mqd_allocation, dynamic_cu_mask));
eop_base_addr = ring->eop_gpu_addr >> 8;
mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
@ -4753,7 +4807,7 @@ static int gfx_v8_0_kiq_init_queue(struct amdgpu_ring *ring)
gfx_v8_0_kiq_setting(ring);
if (adev->gfx.in_reset) { /* for GPU_RESET case */
if (adev->in_sriov_reset) { /* for GPU_RESET case */
/* reset MQD to a clean status */
if (adev->gfx.mec.mqd_backup[mqd_idx])
memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct vi_mqd_allocation));
@ -4790,7 +4844,7 @@ static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring)
struct vi_mqd *mqd = ring->mqd_ptr;
int mqd_idx = ring - &adev->gfx.compute_ring[0];
if (!adev->gfx.in_reset && !adev->gfx.in_suspend) {
if (!adev->in_sriov_reset && !adev->gfx.in_suspend) {
memset((void *)mqd, 0, sizeof(struct vi_mqd_allocation));
((struct vi_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
((struct vi_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
@ -4802,7 +4856,7 @@ static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring)
if (adev->gfx.mec.mqd_backup[mqd_idx])
memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct vi_mqd_allocation));
} else if (adev->gfx.in_reset) { /* for GPU_RESET case */
} else if (adev->in_sriov_reset) { /* for GPU_RESET case */
/* reset MQD to a clean status */
if (adev->gfx.mec.mqd_backup[mqd_idx])
memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct vi_mqd_allocation));
@ -4900,43 +4954,15 @@ static int gfx_v8_0_cp_resume(struct amdgpu_device *adev)
if (!(adev->flags & AMD_IS_APU))
gfx_v8_0_enable_gui_idle_interrupt(adev, false);
if (!adev->pp_enabled) {
if (adev->firmware.load_type != AMDGPU_FW_LOAD_SMU) {
if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
/* legacy firmware loading */
r = gfx_v8_0_cp_gfx_load_microcode(adev);
if (r)
return r;
r = gfx_v8_0_cp_gfx_load_microcode(adev);
if (r)
return r;
r = gfx_v8_0_cp_compute_load_microcode(adev);
if (r)
return r;
} else {
r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
AMDGPU_UCODE_ID_CP_CE);
if (r)
return -EINVAL;
r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
AMDGPU_UCODE_ID_CP_PFP);
if (r)
return -EINVAL;
r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
AMDGPU_UCODE_ID_CP_ME);
if (r)
return -EINVAL;
if (adev->asic_type == CHIP_TOPAZ) {
r = gfx_v8_0_cp_compute_load_microcode(adev);
if (r)
return r;
} else {
r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
AMDGPU_UCODE_ID_CP_MEC1);
if (r)
return -EINVAL;
}
}
r = gfx_v8_0_cp_compute_load_microcode(adev);
if (r)
return r;
}
r = gfx_v8_0_cp_gfx_resume(adev);
@ -4975,12 +5001,69 @@ static int gfx_v8_0_hw_init(void *handle)
return r;
}
static int gfx_v8_0_kcq_disable(struct amdgpu_ring *kiq_ring,struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = kiq_ring->adev;
uint32_t scratch, tmp = 0;
int r, i;
r = amdgpu_gfx_scratch_get(adev, &scratch);
if (r) {
DRM_ERROR("Failed to get scratch reg (%d).\n", r);
return r;
}
WREG32(scratch, 0xCAFEDEAD);
r = amdgpu_ring_alloc(kiq_ring, 10);
if (r) {
DRM_ERROR("Failed to lock KIQ (%d).\n", r);
amdgpu_gfx_scratch_free(adev, scratch);
return r;
}
/* unmap queues */
amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
PACKET3_UNMAP_QUEUES_ACTION(1) | /* RESET_QUEUES */
PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
PACKET3_UNMAP_QUEUES_ENGINE_SEL(0) |
PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
amdgpu_ring_write(kiq_ring, PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
amdgpu_ring_write(kiq_ring, 0);
amdgpu_ring_write(kiq_ring, 0);
amdgpu_ring_write(kiq_ring, 0);
/* write to scratch for completion */
amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
amdgpu_ring_write(kiq_ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
amdgpu_ring_write(kiq_ring, 0xDEADBEEF);
amdgpu_ring_commit(kiq_ring);
for (i = 0; i < adev->usec_timeout; i++) {
tmp = RREG32(scratch);
if (tmp == 0xDEADBEEF)
break;
DRM_UDELAY(1);
}
if (i >= adev->usec_timeout) {
DRM_ERROR("KCQ disabled failed (scratch(0x%04X)=0x%08X)\n", scratch, tmp);
r = -EINVAL;
}
amdgpu_gfx_scratch_free(adev, scratch);
return r;
}
static int gfx_v8_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int i;
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
/* disable KCQ to avoid CPC touch memory not valid anymore */
for (i = 0; i < adev->gfx.num_compute_rings; i++)
gfx_v8_0_kcq_disable(&adev->gfx.kiq.ring, &adev->gfx.compute_ring[i]);
if (amdgpu_sriov_vf(adev)) {
pr_debug("For SRIOV client, shouldn't do anything.\n");
return 0;
@ -5902,7 +5985,6 @@ static int gfx_v8_0_tonga_update_gfx_clock_gating(struct amdgpu_device *adev,
{
uint32_t msg_id, pp_state = 0;
uint32_t pp_support_state = 0;
void *pp_handle = adev->powerplay.pp_handle;
if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_CGLS)) {
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) {
@ -5920,7 +6002,8 @@ static int gfx_v8_0_tonga_update_gfx_clock_gating(struct amdgpu_device *adev,
PP_BLOCK_GFX_CG,
pp_support_state,
pp_state);
amd_set_clockgating_by_smu(pp_handle, msg_id);
if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
}
if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS)) {
@ -5941,7 +6024,8 @@ static int gfx_v8_0_tonga_update_gfx_clock_gating(struct amdgpu_device *adev,
PP_BLOCK_GFX_MG,
pp_support_state,
pp_state);
amd_set_clockgating_by_smu(pp_handle, msg_id);
if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
}
return 0;
@ -5953,7 +6037,6 @@ static int gfx_v8_0_polaris_update_gfx_clock_gating(struct amdgpu_device *adev,
uint32_t msg_id, pp_state = 0;
uint32_t pp_support_state = 0;
void *pp_handle = adev->powerplay.pp_handle;
if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_CGLS)) {
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGLS) {
@ -5971,7 +6054,8 @@ static int gfx_v8_0_polaris_update_gfx_clock_gating(struct amdgpu_device *adev,
PP_BLOCK_GFX_CG,
pp_support_state,
pp_state);
amd_set_clockgating_by_smu(pp_handle, msg_id);
if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
}
if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_3D_CGCG | AMD_CG_SUPPORT_GFX_3D_CGLS)) {
@ -5990,7 +6074,8 @@ static int gfx_v8_0_polaris_update_gfx_clock_gating(struct amdgpu_device *adev,
PP_BLOCK_GFX_3D,
pp_support_state,
pp_state);
amd_set_clockgating_by_smu(pp_handle, msg_id);
if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
}
if (adev->cg_flags & (AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS)) {
@ -6011,7 +6096,8 @@ static int gfx_v8_0_polaris_update_gfx_clock_gating(struct amdgpu_device *adev,
PP_BLOCK_GFX_MG,
pp_support_state,
pp_state);
amd_set_clockgating_by_smu(pp_handle, msg_id);
if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
}
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_RLC_LS) {
@ -6026,7 +6112,8 @@ static int gfx_v8_0_polaris_update_gfx_clock_gating(struct amdgpu_device *adev,
PP_BLOCK_GFX_RLC,
pp_support_state,
pp_state);
amd_set_clockgating_by_smu(pp_handle, msg_id);
if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
}
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
@ -6040,7 +6127,8 @@ static int gfx_v8_0_polaris_update_gfx_clock_gating(struct amdgpu_device *adev,
PP_BLOCK_GFX_CP,
pp_support_state,
pp_state);
amd_set_clockgating_by_smu(pp_handle, msg_id);
if (adev->powerplay.pp_funcs->set_clockgating_by_smu)
amdgpu_dpm_set_clockgating_by_smu(adev, msg_id);
}
return 0;
@ -6307,6 +6395,104 @@ static void gfx_v8_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
}
static void gfx_v8_0_ring_set_pipe_percent(struct amdgpu_ring *ring,
bool acquire)
{
struct amdgpu_device *adev = ring->adev;
int pipe_num, tmp, reg;
int pipe_percent = acquire ? SPI_WCL_PIPE_PERCENT_GFX__VALUE_MASK : 0x1;
pipe_num = ring->me * adev->gfx.mec.num_pipe_per_mec + ring->pipe;
/* first me only has 2 entries, GFX and HP3D */
if (ring->me > 0)
pipe_num -= 2;
reg = mmSPI_WCL_PIPE_PERCENT_GFX + pipe_num;
tmp = RREG32(reg);
tmp = REG_SET_FIELD(tmp, SPI_WCL_PIPE_PERCENT_GFX, VALUE, pipe_percent);
WREG32(reg, tmp);
}
static void gfx_v8_0_pipe_reserve_resources(struct amdgpu_device *adev,
struct amdgpu_ring *ring,
bool acquire)
{
int i, pipe;
bool reserve;
struct amdgpu_ring *iring;
mutex_lock(&adev->gfx.pipe_reserve_mutex);
pipe = amdgpu_gfx_queue_to_bit(adev, ring->me, ring->pipe, 0);
if (acquire)
set_bit(pipe, adev->gfx.pipe_reserve_bitmap);
else
clear_bit(pipe, adev->gfx.pipe_reserve_bitmap);
if (!bitmap_weight(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES)) {
/* Clear all reservations - everyone reacquires all resources */
for (i = 0; i < adev->gfx.num_gfx_rings; ++i)
gfx_v8_0_ring_set_pipe_percent(&adev->gfx.gfx_ring[i],
true);
for (i = 0; i < adev->gfx.num_compute_rings; ++i)
gfx_v8_0_ring_set_pipe_percent(&adev->gfx.compute_ring[i],
true);
} else {
/* Lower all pipes without a current reservation */
for (i = 0; i < adev->gfx.num_gfx_rings; ++i) {
iring = &adev->gfx.gfx_ring[i];
pipe = amdgpu_gfx_queue_to_bit(adev,
iring->me,
iring->pipe,
0);
reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap);
gfx_v8_0_ring_set_pipe_percent(iring, reserve);
}
for (i = 0; i < adev->gfx.num_compute_rings; ++i) {
iring = &adev->gfx.compute_ring[i];
pipe = amdgpu_gfx_queue_to_bit(adev,
iring->me,
iring->pipe,
0);
reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap);
gfx_v8_0_ring_set_pipe_percent(iring, reserve);
}
}
mutex_unlock(&adev->gfx.pipe_reserve_mutex);
}
static void gfx_v8_0_hqd_set_priority(struct amdgpu_device *adev,
struct amdgpu_ring *ring,
bool acquire)
{
uint32_t pipe_priority = acquire ? 0x2 : 0x0;
uint32_t queue_priority = acquire ? 0xf : 0x0;
mutex_lock(&adev->srbm_mutex);
vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
WREG32(mmCP_HQD_PIPE_PRIORITY, pipe_priority);
WREG32(mmCP_HQD_QUEUE_PRIORITY, queue_priority);
vi_srbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
}
static void gfx_v8_0_ring_set_priority_compute(struct amdgpu_ring *ring,
enum amd_sched_priority priority)
{
struct amdgpu_device *adev = ring->adev;
bool acquire = priority == AMD_SCHED_PRIORITY_HIGH_HW;
if (ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE)
return;
gfx_v8_0_hqd_set_priority(adev, ring, acquire);
gfx_v8_0_pipe_reserve_resources(adev, ring, acquire);
}
static void gfx_v8_0_ring_emit_fence_compute(struct amdgpu_ring *ring,
u64 addr, u64 seq,
unsigned flags)
@ -6752,6 +6938,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
.test_ib = gfx_v8_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
.set_priority = gfx_v8_0_ring_set_priority_compute,
};
static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_kiq = {
@ -6960,7 +7147,7 @@ static void gfx_v8_0_ring_emit_ce_meta(struct amdgpu_ring *ring)
{
uint64_t ce_payload_addr;
int cnt_ce;
static union {
union {
struct vi_ce_ib_state regular;
struct vi_ce_ib_state_chained_ib chained;
} ce_payload = {};
@ -6989,7 +7176,7 @@ static void gfx_v8_0_ring_emit_de_meta(struct amdgpu_ring *ring)
{
uint64_t de_payload_addr, gds_addr, csa_addr;
int cnt_de;
static union {
union {
struct vi_de_ib_state regular;
struct vi_de_ib_state_chained_ib chained;
} de_payload = {};

View File

@ -20,6 +20,7 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <linux/kernel.h>
#include <linux/firmware.h>
#include <drm/drmP.h>
#include "amdgpu.h"
@ -66,38 +67,70 @@ MODULE_FIRMWARE("amdgpu/raven_rlc.bin");
static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] =
{
{SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE),
SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID0), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID0)},
{SOC15_REG_OFFSET(GC, 0, mmGDS_VMID1_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID1_SIZE),
SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID1), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID1)},
{SOC15_REG_OFFSET(GC, 0, mmGDS_VMID2_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID2_SIZE),
SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID2), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID2)},
{SOC15_REG_OFFSET(GC, 0, mmGDS_VMID3_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID3_SIZE),
SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID3), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID3)},
{SOC15_REG_OFFSET(GC, 0, mmGDS_VMID4_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID4_SIZE),
SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID4), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID4)},
{SOC15_REG_OFFSET(GC, 0, mmGDS_VMID5_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID5_SIZE),
SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID5), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID5)},
{SOC15_REG_OFFSET(GC, 0, mmGDS_VMID6_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID6_SIZE),
SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID6), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID6)},
{SOC15_REG_OFFSET(GC, 0, mmGDS_VMID7_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID7_SIZE),
SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID7), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID7)},
{SOC15_REG_OFFSET(GC, 0, mmGDS_VMID8_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID8_SIZE),
SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID8), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID8)},
{SOC15_REG_OFFSET(GC, 0, mmGDS_VMID9_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID9_SIZE),
SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID9), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID9)},
{SOC15_REG_OFFSET(GC, 0, mmGDS_VMID10_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID10_SIZE),
SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID10), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID10)},
{SOC15_REG_OFFSET(GC, 0, mmGDS_VMID11_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID11_SIZE),
SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID11), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID11)},
{SOC15_REG_OFFSET(GC, 0, mmGDS_VMID12_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID12_SIZE),
SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID12), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID12)},
{SOC15_REG_OFFSET(GC, 0, mmGDS_VMID13_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID13_SIZE),
SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID13), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID13)},
{SOC15_REG_OFFSET(GC, 0, mmGDS_VMID14_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID14_SIZE),
SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID14), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID14)},
{SOC15_REG_OFFSET(GC, 0, mmGDS_VMID15_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID15_SIZE),
SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID15), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID15)}
{ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_BASE),
SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE),
SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID0),
SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID0) },
{ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID1_BASE),
SOC15_REG_OFFSET(GC, 0, mmGDS_VMID1_SIZE),
SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID1),
SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID1) },
{ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID2_BASE),
SOC15_REG_OFFSET(GC, 0, mmGDS_VMID2_SIZE),
SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID2),
SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID2) },
{ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID3_BASE),
SOC15_REG_OFFSET(GC, 0, mmGDS_VMID3_SIZE),
SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID3),
SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID3) },
{ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID4_BASE),
SOC15_REG_OFFSET(GC, 0, mmGDS_VMID4_SIZE),
SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID4),
SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID4) },
{ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID5_BASE),
SOC15_REG_OFFSET(GC, 0, mmGDS_VMID5_SIZE),
SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID5),
SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID5) },
{ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID6_BASE),
SOC15_REG_OFFSET(GC, 0, mmGDS_VMID6_SIZE),
SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID6),
SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID6) },
{ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID7_BASE),
SOC15_REG_OFFSET(GC, 0, mmGDS_VMID7_SIZE),
SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID7),
SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID7) },
{ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID8_BASE),
SOC15_REG_OFFSET(GC, 0, mmGDS_VMID8_SIZE),
SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID8),
SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID8) },
{ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID9_BASE),
SOC15_REG_OFFSET(GC, 0, mmGDS_VMID9_SIZE),
SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID9),
SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID9) },
{ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID10_BASE),
SOC15_REG_OFFSET(GC, 0, mmGDS_VMID10_SIZE),
SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID10),
SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID10) },
{ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID11_BASE),
SOC15_REG_OFFSET(GC, 0, mmGDS_VMID11_SIZE),
SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID11),
SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID11) },
{ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID12_BASE),
SOC15_REG_OFFSET(GC, 0, mmGDS_VMID12_SIZE),
SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID12),
SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID12)},
{ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID13_BASE),
SOC15_REG_OFFSET(GC, 0, mmGDS_VMID13_SIZE),
SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID13),
SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID13) },
{ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID14_BASE),
SOC15_REG_OFFSET(GC, 0, mmGDS_VMID14_SIZE),
SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID14),
SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID14) },
{ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID15_BASE),
SOC15_REG_OFFSET(GC, 0, mmGDS_VMID15_SIZE),
SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID15),
SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID15) }
};
static const u32 golden_settings_gc_9_0[] =
@ -352,6 +385,25 @@ err1:
return r;
}
static void gfx_v9_0_free_microcode(struct amdgpu_device *adev)
{
release_firmware(adev->gfx.pfp_fw);
adev->gfx.pfp_fw = NULL;
release_firmware(adev->gfx.me_fw);
adev->gfx.me_fw = NULL;
release_firmware(adev->gfx.ce_fw);
adev->gfx.ce_fw = NULL;
release_firmware(adev->gfx.rlc_fw);
adev->gfx.rlc_fw = NULL;
release_firmware(adev->gfx.mec_fw);
adev->gfx.mec_fw = NULL;
release_firmware(adev->gfx.mec2_fw);
adev->gfx.mec2_fw = NULL;
kfree(adev->gfx.rlc.register_list_format);
}
static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
{
const char *chip_name;
@ -1120,30 +1172,22 @@ static int gfx_v9_0_ngg_en(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
int r;
u32 data;
u32 size;
u32 base;
u32 data, base;
if (!amdgpu_ngg)
return 0;
/* Program buffer size */
data = 0;
size = adev->gfx.ngg.buf[NGG_PRIM].size / 256;
data = REG_SET_FIELD(data, WD_BUF_RESOURCE_1, INDEX_BUF_SIZE, size);
size = adev->gfx.ngg.buf[NGG_POS].size / 256;
data = REG_SET_FIELD(data, WD_BUF_RESOURCE_1, POS_BUF_SIZE, size);
data = REG_SET_FIELD(0, WD_BUF_RESOURCE_1, INDEX_BUF_SIZE,
adev->gfx.ngg.buf[NGG_PRIM].size >> 8);
data = REG_SET_FIELD(data, WD_BUF_RESOURCE_1, POS_BUF_SIZE,
adev->gfx.ngg.buf[NGG_POS].size >> 8);
WREG32_SOC15(GC, 0, mmWD_BUF_RESOURCE_1, data);
data = 0;
size = adev->gfx.ngg.buf[NGG_CNTL].size / 256;
data = REG_SET_FIELD(data, WD_BUF_RESOURCE_2, CNTL_SB_BUF_SIZE, size);
size = adev->gfx.ngg.buf[NGG_PARAM].size / 1024;
data = REG_SET_FIELD(data, WD_BUF_RESOURCE_2, PARAM_BUF_SIZE, size);
data = REG_SET_FIELD(0, WD_BUF_RESOURCE_2, CNTL_SB_BUF_SIZE,
adev->gfx.ngg.buf[NGG_CNTL].size >> 8);
data = REG_SET_FIELD(data, WD_BUF_RESOURCE_2, PARAM_BUF_SIZE,
adev->gfx.ngg.buf[NGG_PARAM].size >> 10);
WREG32_SOC15(GC, 0, mmWD_BUF_RESOURCE_2, data);
/* Program buffer base address */
@ -1306,7 +1350,10 @@ static int gfx_v9_0_sw_init(void *handle)
for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
ring = &adev->gfx.gfx_ring[i];
ring->ring_obj = NULL;
sprintf(ring->name, "gfx");
if (!i)
sprintf(ring->name, "gfx");
else
sprintf(ring->name, "gfx_%d", i);
ring->use_doorbell = true;
ring->doorbell_index = AMDGPU_DOORBELL64_GFX_RING0 << 1;
r = amdgpu_ring_init(adev, ring, 1024,
@ -1346,7 +1393,7 @@ static int gfx_v9_0_sw_init(void *handle)
return r;
/* create MQD for all compute queues as wel as KIQ for SRIOV case */
r = amdgpu_gfx_compute_mqd_sw_init(adev, sizeof(struct v9_mqd));
r = amdgpu_gfx_compute_mqd_sw_init(adev, sizeof(struct v9_mqd_allocation));
if (r)
return r;
@ -1398,9 +1445,11 @@ static int gfx_v9_0_sw_fini(void *handle)
amdgpu_gfx_compute_mqd_sw_fini(adev);
amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring, &adev->gfx.kiq.irq);
amdgpu_gfx_kiq_fini(adev);
amdgpu_bo_free_kernel(&adev->virt.csa_obj, &adev->virt.csa_vmid0_addr, NULL);
gfx_v9_0_mec_fini(adev);
gfx_v9_0_ngg_fini(adev);
gfx_v9_0_free_microcode(adev);
return 0;
}
@ -1682,10 +1731,10 @@ static int gfx_v9_0_init_rlc_save_restore_list(struct amdgpu_device *adev)
adev->gfx.rlc.reg_list_format_size_bytes >> 2,
unique_indirect_regs,
&unique_indirect_reg_count,
sizeof(unique_indirect_regs)/sizeof(int),
ARRAY_SIZE(unique_indirect_regs),
indirect_start_offsets,
&indirect_start_offsets_count,
sizeof(indirect_start_offsets)/sizeof(int));
ARRAY_SIZE(indirect_start_offsets));
/* enable auto inc in case it is disabled */
tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL));
@ -1722,12 +1771,12 @@ static int gfx_v9_0_init_rlc_save_restore_list(struct amdgpu_device *adev)
/* write the starting offsets to RLC scratch ram */
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR),
adev->gfx.rlc.starting_offsets_start);
for (i = 0; i < sizeof(indirect_start_offsets)/sizeof(int); i++)
for (i = 0; i < ARRAY_SIZE(indirect_start_offsets); i++)
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA),
indirect_start_offsets[i]);
/* load unique indirect regs*/
for (i = 0; i < sizeof(unique_indirect_regs)/sizeof(int); i++) {
for (i = 0; i < ARRAY_SIZE(unique_indirect_regs); i++) {
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_ADDR_0) + i,
unique_indirect_regs[i] & 0x3FFFF);
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_DATA_0) + i,
@ -1740,11 +1789,7 @@ static int gfx_v9_0_init_rlc_save_restore_list(struct amdgpu_device *adev)
static void gfx_v9_0_enable_save_restore_machine(struct amdgpu_device *adev)
{
u32 tmp = 0;
tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL));
tmp |= RLC_SRM_CNTL__SRM_ENABLE_MASK;
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL), tmp);
WREG32_FIELD15(GC, 0, RLC_SRM_CNTL, SRM_ENABLE, 1);
}
static void pwr_10_0_gfxip_control_over_cgpg(struct amdgpu_device *adev,
@ -1822,16 +1867,11 @@ static void gfx_v9_0_enable_sck_slow_down_on_power_up(struct amdgpu_device *adev
uint32_t default_data = 0;
default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
if (enable == true) {
data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK;
if (default_data != data)
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
} else {
data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK;
if(default_data != data)
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
}
data = REG_SET_FIELD(data, RLC_PG_CNTL,
SMU_CLK_SLOWDOWN_ON_PU_ENABLE,
enable ? 1 : 0);
if (default_data != data)
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
}
static void gfx_v9_0_enable_sck_slow_down_on_power_down(struct amdgpu_device *adev,
@ -1841,16 +1881,11 @@ static void gfx_v9_0_enable_sck_slow_down_on_power_down(struct amdgpu_device *ad
uint32_t default_data = 0;
default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
if (enable == true) {
data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK;
if(default_data != data)
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
} else {
data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK;
if(default_data != data)
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
}
data = REG_SET_FIELD(data, RLC_PG_CNTL,
SMU_CLK_SLOWDOWN_ON_PD_ENABLE,
enable ? 1 : 0);
if(default_data != data)
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
}
static void gfx_v9_0_enable_cp_power_gating(struct amdgpu_device *adev,
@ -1860,16 +1895,11 @@ static void gfx_v9_0_enable_cp_power_gating(struct amdgpu_device *adev,
uint32_t default_data = 0;
default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
if (enable == true) {
data &= ~RLC_PG_CNTL__CP_PG_DISABLE_MASK;
if(default_data != data)
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
} else {
data |= RLC_PG_CNTL__CP_PG_DISABLE_MASK;
if(default_data != data)
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
}
data = REG_SET_FIELD(data, RLC_PG_CNTL,
CP_PG_DISABLE,
enable ? 0 : 1);
if(default_data != data)
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
}
static void gfx_v9_0_enable_gfx_cg_power_gating(struct amdgpu_device *adev,
@ -1878,10 +1908,9 @@ static void gfx_v9_0_enable_gfx_cg_power_gating(struct amdgpu_device *adev,
uint32_t data, default_data;
default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
if (enable == true)
data |= RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
else
data &= ~RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
data = REG_SET_FIELD(data, RLC_PG_CNTL,
GFX_POWER_GATING_ENABLE,
enable ? 1 : 0);
if(default_data != data)
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
}
@ -1892,10 +1921,9 @@ static void gfx_v9_0_enable_gfx_pipeline_powergating(struct amdgpu_device *adev,
uint32_t data, default_data;
default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
if (enable == true)
data |= RLC_PG_CNTL__GFX_PIPELINE_PG_ENABLE_MASK;
else
data &= ~RLC_PG_CNTL__GFX_PIPELINE_PG_ENABLE_MASK;
data = REG_SET_FIELD(data, RLC_PG_CNTL,
GFX_PIPELINE_PG_ENABLE,
enable ? 1 : 0);
if(default_data != data)
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
@ -1910,10 +1938,9 @@ static void gfx_v9_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *ade
uint32_t data, default_data;
default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
if (enable == true)
data |= RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK;
else
data &= ~RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK;
data = REG_SET_FIELD(data, RLC_PG_CNTL,
STATIC_PER_CU_PG_ENABLE,
enable ? 1 : 0);
if(default_data != data)
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
}
@ -1924,10 +1951,9 @@ static void gfx_v9_0_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *ad
uint32_t data, default_data;
default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
if (enable == true)
data |= RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK;
else
data &= ~RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK;
data = REG_SET_FIELD(data, RLC_PG_CNTL,
DYN_PER_CU_PG_ENABLE,
enable ? 1 : 0);
if(default_data != data)
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
}
@ -1967,13 +1993,8 @@ static void gfx_v9_0_init_pg(struct amdgpu_device *adev)
void gfx_v9_0_rlc_stop(struct amdgpu_device *adev)
{
u32 tmp = RREG32_SOC15(GC, 0, mmRLC_CNTL);
tmp = REG_SET_FIELD(tmp, RLC_CNTL, RLC_ENABLE_F32, 0);
WREG32_SOC15(GC, 0, mmRLC_CNTL, tmp);
WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 0);
gfx_v9_0_enable_gui_idle_interrupt(adev, false);
gfx_v9_0_wait_for_rlc_serdes(adev);
}
@ -2045,8 +2066,10 @@ static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev)
{
int r;
if (amdgpu_sriov_vf(adev))
if (amdgpu_sriov_vf(adev)) {
gfx_v9_0_init_csb(adev);
return 0;
}
gfx_v9_0_rlc_stop(adev);
@ -2463,6 +2486,13 @@ static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring)
mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
mqd->compute_misc_reserved = 0x00000003;
mqd->dynamic_cu_mask_addr_lo =
lower_32_bits(ring->mqd_gpu_addr
+ offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
mqd->dynamic_cu_mask_addr_hi =
upper_32_bits(ring->mqd_gpu_addr
+ offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
eop_base_addr = ring->eop_gpu_addr >> 8;
mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
@ -2486,10 +2516,10 @@ static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring)
DOORBELL_SOURCE, 0);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
DOORBELL_HIT, 0);
}
else
} else {
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
DOORBELL_EN, 0);
}
mqd->cp_hqd_pq_doorbell_control = tmp;
@ -2692,10 +2722,10 @@ static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring)
gfx_v9_0_kiq_setting(ring);
if (adev->gfx.in_reset) { /* for GPU_RESET case */
if (adev->in_sriov_reset) { /* for GPU_RESET case */
/* reset MQD to a clean status */
if (adev->gfx.mec.mqd_backup[mqd_idx])
memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
/* reset ring buffer */
ring->wptr = 0;
@ -2707,7 +2737,9 @@ static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring)
soc15_grbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
} else {
memset((void *)mqd, 0, sizeof(*mqd));
memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
mutex_lock(&adev->srbm_mutex);
soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
gfx_v9_0_mqd_init(ring);
@ -2716,7 +2748,7 @@ static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring)
mutex_unlock(&adev->srbm_mutex);
if (adev->gfx.mec.mqd_backup[mqd_idx])
memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation));
}
return 0;
@ -2728,8 +2760,10 @@ static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring)
struct v9_mqd *mqd = ring->mqd_ptr;
int mqd_idx = ring - &adev->gfx.compute_ring[0];
if (!adev->gfx.in_reset && !adev->gfx.in_suspend) {
memset((void *)mqd, 0, sizeof(*mqd));
if (!adev->in_sriov_reset && !adev->gfx.in_suspend) {
memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
mutex_lock(&adev->srbm_mutex);
soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
gfx_v9_0_mqd_init(ring);
@ -2737,11 +2771,11 @@ static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring)
mutex_unlock(&adev->srbm_mutex);
if (adev->gfx.mec.mqd_backup[mqd_idx])
memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
} else if (adev->gfx.in_reset) { /* for GPU_RESET case */
memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation));
} else if (adev->in_sriov_reset) { /* for GPU_RESET case */
/* reset MQD to a clean status */
if (adev->gfx.mec.mqd_backup[mqd_idx])
memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
/* reset ring buffer */
ring->wptr = 0;
@ -2882,12 +2916,70 @@ static int gfx_v9_0_hw_init(void *handle)
return r;
}
static int gfx_v9_0_kcq_disable(struct amdgpu_ring *kiq_ring,struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = kiq_ring->adev;
uint32_t scratch, tmp = 0;
int r, i;
r = amdgpu_gfx_scratch_get(adev, &scratch);
if (r) {
DRM_ERROR("Failed to get scratch reg (%d).\n", r);
return r;
}
WREG32(scratch, 0xCAFEDEAD);
r = amdgpu_ring_alloc(kiq_ring, 10);
if (r) {
DRM_ERROR("Failed to lock KIQ (%d).\n", r);
amdgpu_gfx_scratch_free(adev, scratch);
return r;
}
/* unmap queues */
amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
PACKET3_UNMAP_QUEUES_ACTION(1) | /* RESET_QUEUES */
PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
PACKET3_UNMAP_QUEUES_ENGINE_SEL(0) |
PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
amdgpu_ring_write(kiq_ring, PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
amdgpu_ring_write(kiq_ring, 0);
amdgpu_ring_write(kiq_ring, 0);
amdgpu_ring_write(kiq_ring, 0);
/* write to scratch for completion */
amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
amdgpu_ring_write(kiq_ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
amdgpu_ring_write(kiq_ring, 0xDEADBEEF);
amdgpu_ring_commit(kiq_ring);
for (i = 0; i < adev->usec_timeout; i++) {
tmp = RREG32(scratch);
if (tmp == 0xDEADBEEF)
break;
DRM_UDELAY(1);
}
if (i >= adev->usec_timeout) {
DRM_ERROR("KCQ disabled failed (scratch(0x%04X)=0x%08X)\n", scratch, tmp);
r = -EINVAL;
}
amdgpu_gfx_scratch_free(adev, scratch);
return r;
}
static int gfx_v9_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int i;
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
/* disable KCQ to avoid CPC touch memory not valid anymore */
for (i = 0; i < adev->gfx.num_compute_rings; i++)
gfx_v9_0_kcq_disable(&adev->gfx.kiq.ring, &adev->gfx.compute_ring[i]);
if (amdgpu_sriov_vf(adev)) {
pr_debug("For SRIOV client, shouldn't do anything.\n");
return 0;
@ -2930,15 +3022,10 @@ static bool gfx_v9_0_is_idle(void *handle)
static int gfx_v9_0_wait_for_idle(void *handle)
{
unsigned i;
u32 tmp;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
for (i = 0; i < adev->usec_timeout; i++) {
/* read MC_STATUS */
tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS) &
GRBM_STATUS__GUI_ACTIVE_MASK;
if (!REG_GET_FIELD(tmp, GRBM_STATUS, GUI_ACTIVE))
if (gfx_v9_0_is_idle(handle))
return 0;
udelay(1);
}
@ -3497,9 +3584,11 @@ static void gfx_v9_0_ring_set_wptr_gfx(struct amdgpu_ring *ring)
static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{
u32 ref_and_mask, reg_mem_engine;
struct nbio_hdp_flush_reg *nbio_hf_reg;
const struct nbio_hdp_flush_reg *nbio_hf_reg;
if (ring->adev->asic_type == CHIP_VEGA10)
if (ring->adev->flags & AMD_IS_APU)
nbio_hf_reg = &nbio_v7_0_hdp_flush_reg;
else
nbio_hf_reg = &nbio_v6_1_hdp_flush_reg;
if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
@ -3528,7 +3617,7 @@ static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
static void gfx_v9_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
{
gfx_v9_0_write_data_to_reg(ring, 0, true,
SOC15_REG_OFFSET(HDP, 0, mmHDP_DEBUG0), 1);
SOC15_REG_OFFSET(HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1);
}
static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
@ -3718,7 +3807,7 @@ static void gfx_v9_ring_emit_sb(struct amdgpu_ring *ring)
static void gfx_v9_0_ring_emit_ce_meta(struct amdgpu_ring *ring)
{
static struct v9_ce_ib_state ce_payload = {0};
struct v9_ce_ib_state ce_payload = {0};
uint64_t csa_addr;
int cnt;
@ -3737,7 +3826,7 @@ static void gfx_v9_0_ring_emit_ce_meta(struct amdgpu_ring *ring)
static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring)
{
static struct v9_de_ib_state de_payload = {0};
struct v9_de_ib_state de_payload = {0};
uint64_t csa_addr, gds_addr;
int cnt;
@ -3757,6 +3846,12 @@ static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring)
amdgpu_ring_write_multiple(ring, (void *)&de_payload, sizeof(de_payload) >> 2);
}
static void gfx_v9_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start)
{
amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0));
amdgpu_ring_write(ring, FRAME_CMD(start ? 0 : 1)); /* frame_end */
}
static void gfx_v9_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
{
uint32_t dw2 = 0;
@ -3764,6 +3859,8 @@ static void gfx_v9_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
if (amdgpu_sriov_vf(ring->adev))
gfx_v9_0_ring_emit_ce_meta(ring);
gfx_v9_0_ring_emit_tmz(ring, true);
dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
if (flags & AMDGPU_HAVE_CTX_SWITCH) {
/* set load_global_config & load_global_uconfig */
@ -3814,12 +3911,6 @@ static void gfx_v9_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigne
ring->ring[offset] = (ring->ring_size>>2) - offset + cur;
}
static void gfx_v9_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start)
{
amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0));
amdgpu_ring_write(ring, FRAME_CMD(start ? 0 : 1)); /* frame_end */
}
static void gfx_v9_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
{
struct amdgpu_device *adev = ring->adev;

View File

@ -319,6 +319,12 @@ void gfxhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev,
WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
if (!value) {
tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
CRASH_ON_NO_RETRY_FAULT, 1);
tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
CRASH_ON_RETRY_FAULT, 1);
}
WREG32_SOC15(GC, 0, mmVM_L2_PROTECTION_FAULT_CNTL, tmp);
}

View File

@ -831,7 +831,7 @@ static int gmc_v6_0_sw_init(void *handle)
if (r)
return r;
amdgpu_vm_adjust_size(adev, 64, 4);
amdgpu_vm_adjust_size(adev, 64, 9);
adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18;
adev->mc.mc_mask = 0xffffffffffULL;
@ -901,6 +901,8 @@ static int gmc_v6_0_sw_fini(void *handle)
gmc_v6_0_gart_fini(adev);
amdgpu_gem_force_release(adev);
amdgpu_bo_fini(adev);
release_firmware(adev->mc.fw);
adev->mc.fw = NULL;
return 0;
}

View File

@ -970,7 +970,7 @@ static int gmc_v7_0_sw_init(void *handle)
* Currently set to 4GB ((1 << 20) 4k pages).
* Max GPUVM size for cayman and SI is 40 bits.
*/
amdgpu_vm_adjust_size(adev, 64, 4);
amdgpu_vm_adjust_size(adev, 64, 9);
adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18;
/* Set the internal MC address mask
@ -1050,6 +1050,8 @@ static int gmc_v7_0_sw_fini(void *handle)
gmc_v7_0_gart_fini(adev);
amdgpu_gem_force_release(adev);
amdgpu_bo_fini(adev);
release_firmware(adev->mc.fw);
adev->mc.fw = NULL;
return 0;
}

View File

@ -1067,7 +1067,7 @@ static int gmc_v8_0_sw_init(void *handle)
* Currently set to 4GB ((1 << 20) 4k pages).
* Max GPUVM size for cayman and SI is 40 bits.
*/
amdgpu_vm_adjust_size(adev, 64, 4);
amdgpu_vm_adjust_size(adev, 64, 9);
adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18;
/* Set the internal MC address mask
@ -1147,6 +1147,8 @@ static int gmc_v8_0_sw_fini(void *handle)
gmc_v8_0_gart_fini(adev);
amdgpu_gem_force_release(adev);
amdgpu_bo_fini(adev);
release_firmware(adev->mc.fw);
adev->mc.fw = NULL;
return 0;
}

View File

@ -32,6 +32,8 @@
#include "vega10/DC/dce_12_0_offset.h"
#include "vega10/DC/dce_12_0_sh_mask.h"
#include "vega10/vega10_enum.h"
#include "vega10/MMHUB/mmhub_1_0_offset.h"
#include "vega10/ATHUB/athub_1_0_offset.h"
#include "soc15_common.h"
@ -71,13 +73,25 @@ static const u32 golden_settings_vega10_hdp[] =
0xf6e, 0x0fffffff, 0x00000000,
};
static const u32 golden_settings_mmhub_1_0_0[] =
{
SOC15_REG_OFFSET(MMHUB, 0, mmDAGB1_WRCLI2), 0x00000007, 0xfe5fe0fa,
SOC15_REG_OFFSET(MMHUB, 0, mmMMEA1_DRAM_WR_CLI2GRP_MAP0), 0x00000030, 0x55555565
};
static const u32 golden_settings_athub_1_0_0[] =
{
SOC15_REG_OFFSET(ATHUB, 0, mmRPB_ARB_CNTL), 0x0000ff00, 0x00000800,
SOC15_REG_OFFSET(ATHUB, 0, mmRPB_ARB_CNTL2), 0x00ff00ff, 0x00080008
};
static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned type,
enum amdgpu_interrupt_state state)
{
struct amdgpu_vmhub *hub;
u32 tmp, reg, bits, i;
u32 tmp, reg, bits, i, j;
bits = VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
@ -89,43 +103,26 @@ static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
/* MM HUB */
hub = &adev->vmhub[AMDGPU_MMHUB];
for (i = 0; i< 16; i++) {
reg = hub->vm_context0_cntl + i;
tmp = RREG32(reg);
tmp &= ~bits;
WREG32(reg, tmp);
}
/* GFX HUB */
hub = &adev->vmhub[AMDGPU_GFXHUB];
for (i = 0; i < 16; i++) {
reg = hub->vm_context0_cntl + i;
tmp = RREG32(reg);
tmp &= ~bits;
WREG32(reg, tmp);
for (j = 0; j < AMDGPU_MAX_VMHUBS; j++) {
hub = &adev->vmhub[j];
for (i = 0; i < 16; i++) {
reg = hub->vm_context0_cntl + i;
tmp = RREG32(reg);
tmp &= ~bits;
WREG32(reg, tmp);
}
}
break;
case AMDGPU_IRQ_STATE_ENABLE:
/* MM HUB */
hub = &adev->vmhub[AMDGPU_MMHUB];
for (i = 0; i< 16; i++) {
reg = hub->vm_context0_cntl + i;
tmp = RREG32(reg);
tmp |= bits;
WREG32(reg, tmp);
for (j = 0; j < AMDGPU_MAX_VMHUBS; j++) {
hub = &adev->vmhub[j];
for (i = 0; i < 16; i++) {
reg = hub->vm_context0_cntl + i;
tmp = RREG32(reg);
tmp |= bits;
WREG32(reg, tmp);
}
}
/* GFX HUB */
hub = &adev->vmhub[AMDGPU_GFXHUB];
for (i = 0; i < 16; i++) {
reg = hub->vm_context0_cntl + i;
tmp = RREG32(reg);
tmp |= bits;
WREG32(reg, tmp);
}
break;
default:
break;
}
@ -682,8 +679,17 @@ static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
case CHIP_VEGA10:
amdgpu_program_register_sequence(adev,
golden_settings_mmhub_1_0_0,
(const u32)ARRAY_SIZE(golden_settings_mmhub_1_0_0));
amdgpu_program_register_sequence(adev,
golden_settings_athub_1_0_0,
(const u32)ARRAY_SIZE(golden_settings_athub_1_0_0));
break;
case CHIP_RAVEN:
amdgpu_program_register_sequence(adev,
golden_settings_athub_1_0_0,
(const u32)ARRAY_SIZE(golden_settings_athub_1_0_0));
break;
default:
break;
@ -713,12 +719,6 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
if (r)
return r;
/* After HDP is initialized, flush HDP.*/
if (adev->flags & AMD_IS_APU)
nbio_v7_0_hdp_flush(adev);
else
nbio_v6_1_hdp_flush(adev);
switch (adev->asic_type) {
case CHIP_RAVEN:
mmhub_v1_0_initialize_power_gating(adev);
@ -736,13 +736,16 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
if (r)
return r;
tmp = RREG32_SOC15(HDP, 0, mmHDP_MISC_CNTL);
tmp |= HDP_MISC_CNTL__FLUSH_INVALIDATE_CACHE_MASK;
WREG32_SOC15(HDP, 0, mmHDP_MISC_CNTL, tmp);
WREG32_FIELD15(HDP, 0, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1);
tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL);
WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp);
/* After HDP is initialized, flush HDP.*/
if (adev->flags & AMD_IS_APU)
nbio_v7_0_hdp_flush(adev);
else
nbio_v6_1_hdp_flush(adev);
if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
value = false;
@ -751,7 +754,6 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
gfxhub_v1_0_set_fault_enable_default(adev, value);
mmhub_v1_0_set_fault_enable_default(adev, value);
gmc_v9_0_gart_flush_gpu_tlb(adev, 0);
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
@ -770,17 +772,11 @@ static int gmc_v9_0_hw_init(void *handle)
gmc_v9_0_init_golden_registers(adev);
if (adev->mode_info.num_crtc) {
u32 tmp;
/* Lockout access through VGA aperture*/
tmp = RREG32_SOC15(DCE, 0, mmVGA_HDP_CONTROL);
tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
WREG32_SOC15(DCE, 0, mmVGA_HDP_CONTROL, tmp);
WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
/* disable VGA render */
tmp = RREG32_SOC15(DCE, 0, mmVGA_RENDER_CONTROL);
tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
WREG32_SOC15(DCE, 0, mmVGA_RENDER_CONTROL, tmp);
WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
}
r = gmc_v9_0_gart_enable(adev);
@ -822,9 +818,7 @@ static int gmc_v9_0_suspend(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
gmc_v9_0_hw_fini(adev);
return 0;
return gmc_v9_0_hw_fini(adev);
}
static int gmc_v9_0_resume(void *handle)

View File

@ -207,6 +207,34 @@ static u32 iceland_ih_get_wptr(struct amdgpu_device *adev)
return (wptr & adev->irq.ih.ptr_mask);
}
/**
* iceland_ih_prescreen_iv - prescreen an interrupt vector
*
* @adev: amdgpu_device pointer
*
* Returns true if the interrupt vector should be further processed.
*/
static bool iceland_ih_prescreen_iv(struct amdgpu_device *adev)
{
u32 ring_index = adev->irq.ih.rptr >> 2;
u16 pasid;
switch (le32_to_cpu(adev->irq.ih.ring[ring_index]) & 0xff) {
case 146:
case 147:
pasid = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]) >> 16;
if (!pasid || amdgpu_vm_pasid_fault_credit(adev, pasid))
return true;
break;
default:
/* Not a VM fault */
return true;
}
adev->irq.ih.rptr += 16;
return false;
}
/**
* iceland_ih_decode_iv - decode an interrupt vector
*
@ -412,6 +440,7 @@ static const struct amd_ip_funcs iceland_ih_ip_funcs = {
static const struct amdgpu_ih_funcs iceland_ih_funcs = {
.get_wptr = iceland_ih_get_wptr,
.prescreen_iv = iceland_ih_prescreen_iv,
.decode_iv = iceland_ih_decode_iv,
.set_rptr = iceland_ih_set_rptr
};

View File

@ -42,7 +42,6 @@
#define KV_MINIMUM_ENGINE_CLOCK 800
#define SMC_RAM_END 0x40000
static void kv_dpm_set_dpm_funcs(struct amdgpu_device *adev);
static void kv_dpm_set_irq_funcs(struct amdgpu_device *adev);
static int kv_enable_nb_dpm(struct amdgpu_device *adev,
bool enable);
@ -64,7 +63,7 @@ static int kv_set_thermal_temperature_range(struct amdgpu_device *adev,
int min_temp, int max_temp);
static int kv_init_fps_limits(struct amdgpu_device *adev);
static void kv_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate);
static void kv_dpm_powergate_uvd(void *handle, bool gate);
static void kv_dpm_powergate_vce(struct amdgpu_device *adev, bool gate);
static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate);
static void kv_dpm_powergate_acp(struct amdgpu_device *adev, bool gate);
@ -1245,8 +1244,9 @@ static void kv_update_requested_ps(struct amdgpu_device *adev,
adev->pm.dpm.requested_ps = &pi->requested_rps;
}
static void kv_dpm_enable_bapm(struct amdgpu_device *adev, bool enable)
static void kv_dpm_enable_bapm(void *handle, bool enable)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct kv_power_info *pi = kv_get_pi(adev);
int ret;
@ -1672,8 +1672,9 @@ static int kv_update_acp_dpm(struct amdgpu_device *adev, bool gate)
return kv_enable_acp_dpm(adev, !gate);
}
static void kv_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate)
static void kv_dpm_powergate_uvd(void *handle, bool gate)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct kv_power_info *pi = kv_get_pi(adev);
int ret;
@ -1868,10 +1869,11 @@ static int kv_enable_nb_dpm(struct amdgpu_device *adev,
return ret;
}
static int kv_dpm_force_performance_level(struct amdgpu_device *adev,
static int kv_dpm_force_performance_level(void *handle,
enum amd_dpm_forced_level level)
{
int ret;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (level == AMD_DPM_FORCED_LEVEL_HIGH) {
ret = kv_force_dpm_highest(adev);
@ -1892,8 +1894,9 @@ static int kv_dpm_force_performance_level(struct amdgpu_device *adev,
return 0;
}
static int kv_dpm_pre_set_power_state(struct amdgpu_device *adev)
static int kv_dpm_pre_set_power_state(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct kv_power_info *pi = kv_get_pi(adev);
struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps;
struct amdgpu_ps *new_ps = &requested_ps;
@ -1907,8 +1910,9 @@ static int kv_dpm_pre_set_power_state(struct amdgpu_device *adev)
return 0;
}
static int kv_dpm_set_power_state(struct amdgpu_device *adev)
static int kv_dpm_set_power_state(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct kv_power_info *pi = kv_get_pi(adev);
struct amdgpu_ps *new_ps = &pi->requested_rps;
struct amdgpu_ps *old_ps = &pi->current_rps;
@ -1981,8 +1985,9 @@ static int kv_dpm_set_power_state(struct amdgpu_device *adev)
return 0;
}
static void kv_dpm_post_set_power_state(struct amdgpu_device *adev)
static void kv_dpm_post_set_power_state(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct kv_power_info *pi = kv_get_pi(adev);
struct amdgpu_ps *new_ps = &pi->requested_rps;
@ -2848,9 +2853,10 @@ static int kv_dpm_init(struct amdgpu_device *adev)
}
static void
kv_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
kv_dpm_debugfs_print_current_performance_level(void *handle,
struct seq_file *m)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct kv_power_info *pi = kv_get_pi(adev);
u32 current_index =
(RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
@ -2875,11 +2881,12 @@ kv_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
}
static void
kv_dpm_print_power_state(struct amdgpu_device *adev,
struct amdgpu_ps *rps)
kv_dpm_print_power_state(void *handle, void *request_ps)
{
int i;
struct amdgpu_ps *rps = (struct amdgpu_ps *)request_ps;
struct kv_ps *ps = kv_get_ps(rps);
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
amdgpu_dpm_print_class_info(rps->class, rps->class2);
amdgpu_dpm_print_cap_info(rps->caps);
@ -2905,13 +2912,14 @@ static void kv_dpm_fini(struct amdgpu_device *adev)
amdgpu_free_extended_power_table(adev);
}
static void kv_dpm_display_configuration_changed(struct amdgpu_device *adev)
static void kv_dpm_display_configuration_changed(void *handle)
{
}
static u32 kv_dpm_get_sclk(struct amdgpu_device *adev, bool low)
static u32 kv_dpm_get_sclk(void *handle, bool low)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct kv_power_info *pi = kv_get_pi(adev);
struct kv_ps *requested_state = kv_get_ps(&pi->requested_rps);
@ -2921,18 +2929,20 @@ static u32 kv_dpm_get_sclk(struct amdgpu_device *adev, bool low)
return requested_state->levels[requested_state->num_levels - 1].sclk;
}
static u32 kv_dpm_get_mclk(struct amdgpu_device *adev, bool low)
static u32 kv_dpm_get_mclk(void *handle, bool low)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct kv_power_info *pi = kv_get_pi(adev);
return pi->sys_info.bootup_uma_clk;
}
/* get temperature in millidegrees */
static int kv_dpm_get_temp(struct amdgpu_device *adev)
static int kv_dpm_get_temp(void *handle)
{
u32 temp;
int actual_temp = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
temp = RREG32_SMC(0xC0300E0C);
@ -2950,7 +2960,6 @@ static int kv_dpm_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
kv_dpm_set_dpm_funcs(adev);
kv_dpm_set_irq_funcs(adev);
return 0;
@ -2960,16 +2969,10 @@ static int kv_dpm_late_init(void *handle)
{
/* powerdown unused blocks for now */
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int ret;
if (!amdgpu_dpm)
return 0;
/* init the sysfs and debugfs files late */
ret = amdgpu_pm_sysfs_init(adev);
if (ret)
return ret;
kv_dpm_powergate_acp(adev, true);
kv_dpm_powergate_samu(adev, true);
@ -3031,7 +3034,6 @@ static int kv_dpm_sw_fini(void *handle)
flush_work(&adev->pm.dpm.thermal.work);
mutex_lock(&adev->pm.mutex);
amdgpu_pm_sysfs_fini(adev);
kv_dpm_fini(adev);
mutex_unlock(&adev->pm.mutex);
@ -3222,14 +3224,17 @@ static inline bool kv_are_power_levels_equal(const struct kv_pl *kv_cpl1,
(kv_cpl1->force_nbp_state == kv_cpl2->force_nbp_state));
}
static int kv_check_state_equal(struct amdgpu_device *adev,
struct amdgpu_ps *cps,
struct amdgpu_ps *rps,
static int kv_check_state_equal(void *handle,
void *current_ps,
void *request_ps,
bool *equal)
{
struct kv_ps *kv_cps;
struct kv_ps *kv_rps;
int i;
struct amdgpu_ps *cps = (struct amdgpu_ps *)current_ps;
struct amdgpu_ps *rps = (struct amdgpu_ps *)request_ps;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev == NULL || cps == NULL || rps == NULL || equal == NULL)
return -EINVAL;
@ -3262,9 +3267,10 @@ static int kv_check_state_equal(struct amdgpu_device *adev,
return 0;
}
static int kv_dpm_read_sensor(struct amdgpu_device *adev, int idx,
static int kv_dpm_read_sensor(void *handle, int idx,
void *value, int *size)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct kv_power_info *pi = kv_get_pi(adev);
uint32_t sclk;
u32 pl_index =
@ -3312,7 +3318,7 @@ const struct amd_ip_funcs kv_dpm_ip_funcs = {
.set_powergating_state = kv_dpm_set_powergating_state,
};
static const struct amdgpu_dpm_funcs kv_dpm_funcs = {
const struct amd_pm_funcs kv_dpm_funcs = {
.get_temperature = &kv_dpm_get_temp,
.pre_set_power_state = &kv_dpm_pre_set_power_state,
.set_power_state = &kv_dpm_set_power_state,
@ -3330,12 +3336,6 @@ static const struct amdgpu_dpm_funcs kv_dpm_funcs = {
.read_sensor = &kv_dpm_read_sensor,
};
static void kv_dpm_set_dpm_funcs(struct amdgpu_device *adev)
{
if (adev->pm.funcs == NULL)
adev->pm.funcs = &kv_dpm_funcs;
}
static const struct amdgpu_irq_src_funcs kv_dpm_irq_funcs = {
.set = kv_dpm_set_interrupt_state,
.process = kv_dpm_process_interrupt,

View File

@ -273,7 +273,7 @@ static const struct pctl_data pctl0_data[] = {
{0x135, 0x12a810},
{0x149, 0x7a82c}
};
#define PCTL0_DATA_LEN (sizeof(pctl0_data)/sizeof(pctl0_data[0]))
#define PCTL0_DATA_LEN (ARRAY_SIZE(pctl0_data))
#define PCTL0_RENG_EXEC_END_PTR 0x151
#define PCTL0_STCTRL_REG_SAVE_RANGE0_BASE 0xa640
@ -309,7 +309,7 @@ static const struct pctl_data pctl1_data[] = {
{0x1f0, 0x5000a7f6},
{0x1f1, 0x5000a7e4}
};
#define PCTL1_DATA_LEN (sizeof(pctl1_data)/sizeof(pctl1_data[0]))
#define PCTL1_DATA_LEN (ARRAY_SIZE(pctl1_data))
#define PCTL1_RENG_EXEC_END_PTR 0x1f1
#define PCTL1_STCTRL_REG_SAVE_RANGE0_BASE 0xa000
@ -561,6 +561,13 @@ void mmhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev, bool value)
WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
if (!value) {
tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
CRASH_ON_NO_RETRY_FAULT, 1);
tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
CRASH_ON_RETRY_FAULT, 1);
}
WREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL, tmp);
}

Some files were not shown because too many files have changed in this diff Show More