1
0
Fork 0

drm fixes for 5.12-rc5

msm:
 - pll fixes
 - shutdown hook fix
 - runtime resume fix
 - clear_oob fix
 - kms locking fix
 - display aux retry fix
 
 rcar-du:
 - warn_on in encoder init fix
 
 etnaviv:
 - Use FOLL_FORCE and FOLL_LONGTERM
 
 i915:
 - DisplayPort LTTPR fixes around link training and limiting it
   according to supported spec version.
 - Fix enabled_planes bitmask to really represent only logically
   enabled planes.
 - Fix DSS CTL registers for ICL DSI transcoders
 - Fix the GT fence revocation runtime PM logic.
 
 nouveau:
 - cursor size regression fix
 
 amdgpu:
 - S0ix fixes
 - Add PCI ID
 - Polaris PCIe DPM fix
 - Display fix for high refresh rate monitors
 -----BEGIN PGP SIGNATURE-----
 
 iQIcBAABAgAGBQJgXVVAAAoJEAx081l5xIa+KGUP/2PaWOkYrEnZHOKAzTDethpQ
 e5SzfbK7ywDsM4vPHrz+RnJqa269xkddXaEYzajilrN6/Mmj56pyRrMWRD040E3a
 fL5DSmEJtotFqlSRZxeZ+NZQpTLY0ujIBcAiOs1c7JOKbIeUXByZTIL0L0joOAcW
 ASVWOjet6ptWj/t2VNTh/7Lkh6vOE9NpyeMVIgx1D4ep5LTO164WMdfqZshEWVXq
 IlDgO9cQ5MRl/S3oxs0PJ42lQZMGPlKHTmvch5GgybKit16Ah4XE4Ur+ND7TB7nu
 yeN9+geD2spxnZjRpjPZkzFO/kdAeEoSy4fR6RmmdlyfKyPaIBbL2VL6Ox2L+x1W
 H4D+Gl5D1yX38Tp3XO/XoxVMQTdi3EdBP+9piGvqIR+cGJApjP+FjwiWrl1/IPCb
 eSRjzgaTQlSCTOevmjEggRY7g2krrMvb8eVfg1T2QxdM6EEloCbBui5RJEUoMErj
 +FyLXBWXJSAlbvfjeIKi1id3ZydL17Y3Hsm5bbmMCjyiWnQvGqS6Ia0BZC+Wan30
 +hlKkJnVJakiycAvoU/i4bsAPLmzrfIrFRPe6HepLczX0t7Srz7X37SdoSko6a07
 6c8Y1nCMxj6gMhblB6MTK5/5PIu4xPJ0wTt/kppTS3UESu3TlnJsG8s/I8BcttDc
 fewkz0IkO9JRSfp9Cxs3
 =S23m
 -----END PGP SIGNATURE-----

Merge tag 'drm-fixes-2021-03-26' of git://anongit.freedesktop.org/drm/drm

Pull drm fixes from Dave Airlie:
 "As expected last week things were overly quiet so this week things
  seem to have caught up. It still isn't too major.

  msm and amdgpu lead the size here, the msm fixes are pretty varied
  across the driver, the amdgpu one is mostly the S0ix fixes with some
  other minor ones. Otherwise there are a few i915 fixes and one each
  for nouveau, etnaviv and rcar-du.

  msm:
   - pll fixes
   - shutdown hook fix
   - runtime resume fix
   - clear_oob fix
   - kms locking fix
   - display aux retry fix

  rcar-du:
   - warn_on in encoder init fix

  etnaviv:
   - Use FOLL_FORCE and FOLL_LONGTERM

  i915:
   - DisplayPort LTTPR fixes around link training and limiting it
     according to supported spec version.
   - Fix enabled_planes bitmask to really represent only logically
     enabled planes.
   - Fix DSS CTL registers for ICL DSI transcoders
   - Fix the GT fence revocation runtime PM logic.

  nouveau:
   - cursor size regression fix

  amdgpu:
   - S0ix fixes
   - Add PCI ID
   - Polaris PCIe DPM fix
   - Display fix for high refresh rate monitors"

* tag 'drm-fixes-2021-03-26' of git://anongit.freedesktop.org/drm/drm: (37 commits)
  drm/nouveau/kms/nve4-nv108: Limit cursors to 128x128
  drm/i915: Fix the GT fence revocation runtime PM logic
  drm/amdgpu/display: restore AUX_DPHY_TX_CONTROL for DCN2.x
  drm/amdgpu: Add additional Sienna Cichlid PCI ID
  drm/amd/pm: workaround for audio noise issue
  drm/i915/dsc: fix DSS CTL register usage for ICL DSI transcoders
  drm/i915: Fix enabled_planes bitmask
  drm/i915: Disable LTTPR support when the LTTPR rev < 1.4
  drm/i915: Disable LTTPR support when the DPCD rev < 1.4
  drm/i915/ilk-glk: Fix link training on links with LTTPRs
  drm/msm/disp/dpu1: icc path needs to be set before dpu runtime resume
  drm/amdgpu: skip kfd suspend/resume for S0ix
  drm/amdgpu: drop S0ix checks around CG/PG in suspend
  drm/amdgpu: skip CG/PG for gfx during S0ix
  drm/amdgpu: update comments about s0ix suspend/resume
  drm/amdgpu/swsmu: skip gfx cgpg on s0ix suspend
  drm/amdgpu: re-enable suspend phase 2 for S0ix
  drm/amdgpu: move s0ix check into amdgpu_device_ip_suspend_phase2 (v3)
  drm/amdgpu: clean up non-DC suspend/resume handling
  drm/amdgpu: don't evict vram on APUs for suspend to ram (v4)
  ...
rM2-mainline
Linus Torvalds 2021-03-26 11:05:18 -07:00
commit f944d061f8
41 changed files with 616 additions and 265 deletions

View File

@ -1007,13 +1007,9 @@ struct amdgpu_device {
/* s3/s4 mask */
bool in_suspend;
bool in_hibernate;
/*
* The combination flag in_poweroff_reboot_com used to identify the poweroff
* and reboot opt in the s0i3 system-wide suspend.
*/
bool in_poweroff_reboot_com;
bool in_s3;
bool in_s4;
bool in_s0ix;
atomic_t in_gpu_reset;
enum pp_mp1_state mp1_state;

View File

@ -2371,6 +2371,10 @@ static int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
if (!adev->ip_blocks[i].status.late_initialized)
continue;
/* skip CG for GFX on S0ix */
if (adev->in_s0ix &&
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)
continue;
/* skip CG for VCE/UVD, it's handled specially */
if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
@ -2402,6 +2406,10 @@ static int amdgpu_device_set_pg_state(struct amdgpu_device *adev, enum amd_power
i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
if (!adev->ip_blocks[i].status.late_initialized)
continue;
/* skip PG for GFX on S0ix */
if (adev->in_s0ix &&
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)
continue;
/* skip CG for VCE/UVD, it's handled specially */
if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
@ -2678,11 +2686,8 @@ static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
{
int i, r;
if (adev->in_poweroff_reboot_com ||
!amdgpu_acpi_is_s0ix_supported(adev) || amdgpu_in_reset(adev)) {
amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
}
amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
if (!adev->ip_blocks[i].status.valid)
@ -2722,6 +2727,9 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
{
int i, r;
if (adev->in_s0ix)
amdgpu_gfx_state_change_set(adev, sGpuChangeState_D3Entry);
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
if (!adev->ip_blocks[i].status.valid)
continue;
@ -2734,6 +2742,17 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
adev->ip_blocks[i].status.hw = false;
continue;
}
/* skip suspend of gfx and psp for S0ix
* gfx is in gfxoff state, so on resume it will exit gfxoff just
* like at runtime. PSP is also part of the always on hardware
* so no need to suspend it.
*/
if (adev->in_s0ix &&
(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP ||
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX))
continue;
/* XXX handle errors */
r = adev->ip_blocks[i].version->funcs->suspend(adev);
/* XXX handle errors */
@ -3673,14 +3692,9 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
*/
int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
{
struct amdgpu_device *adev;
struct drm_crtc *crtc;
struct drm_connector *connector;
struct drm_connector_list_iter iter;
struct amdgpu_device *adev = drm_to_adev(dev);
int r;
adev = drm_to_adev(dev);
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
@ -3692,61 +3706,19 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
cancel_delayed_work_sync(&adev->delayed_init_work);
if (!amdgpu_device_has_dc_support(adev)) {
/* turn off display hw */
drm_modeset_lock_all(dev);
drm_connector_list_iter_begin(dev, &iter);
drm_for_each_connector_iter(connector, &iter)
drm_helper_connector_dpms(connector,
DRM_MODE_DPMS_OFF);
drm_connector_list_iter_end(&iter);
drm_modeset_unlock_all(dev);
/* unpin the front buffers and cursors */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_framebuffer *fb = crtc->primary->fb;
struct amdgpu_bo *robj;
if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
r = amdgpu_bo_reserve(aobj, true);
if (r == 0) {
amdgpu_bo_unpin(aobj);
amdgpu_bo_unreserve(aobj);
}
}
if (fb == NULL || fb->obj[0] == NULL) {
continue;
}
robj = gem_to_amdgpu_bo(fb->obj[0]);
/* don't unpin kernel fb objects */
if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
r = amdgpu_bo_reserve(robj, true);
if (r == 0) {
amdgpu_bo_unpin(robj);
amdgpu_bo_unreserve(robj);
}
}
}
}
amdgpu_ras_suspend(adev);
r = amdgpu_device_ip_suspend_phase1(adev);
amdgpu_amdkfd_suspend(adev, adev->in_runpm);
if (!adev->in_s0ix)
amdgpu_amdkfd_suspend(adev, adev->in_runpm);
/* evict vram memory */
amdgpu_bo_evict_vram(adev);
amdgpu_fence_driver_suspend(adev);
if (adev->in_poweroff_reboot_com ||
!amdgpu_acpi_is_s0ix_supported(adev) || amdgpu_in_reset(adev))
r = amdgpu_device_ip_suspend_phase2(adev);
else
amdgpu_gfx_state_change_set(adev, sGpuChangeState_D3Entry);
r = amdgpu_device_ip_suspend_phase2(adev);
/* evict remaining vram memory
* This second call to evict vram is to evict the gart page table
* using the CPU.
@ -3768,16 +3740,13 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
*/
int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
{
struct drm_connector *connector;
struct drm_connector_list_iter iter;
struct amdgpu_device *adev = drm_to_adev(dev);
struct drm_crtc *crtc;
int r = 0;
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
if (amdgpu_acpi_is_s0ix_supported(adev))
if (adev->in_s0ix)
amdgpu_gfx_state_change_set(adev, sGpuChangeState_D0Entry);
/* post card */
@ -3802,50 +3771,17 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
queue_delayed_work(system_wq, &adev->delayed_init_work,
msecs_to_jiffies(AMDGPU_RESUME_MS));
if (!amdgpu_device_has_dc_support(adev)) {
/* pin cursors */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
r = amdgpu_bo_reserve(aobj, true);
if (r == 0) {
r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
if (r != 0)
dev_err(adev->dev, "Failed to pin cursor BO (%d)\n", r);
amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
amdgpu_bo_unreserve(aobj);
}
}
}
if (!adev->in_s0ix) {
r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
if (r)
return r;
}
r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
if (r)
return r;
/* Make sure IB tests flushed */
flush_delayed_work(&adev->delayed_init_work);
/* blat the mode back in */
if (fbcon) {
if (!amdgpu_device_has_dc_support(adev)) {
/* pre DCE11 */
drm_helper_resume_force_mode(dev);
/* turn on display hw */
drm_modeset_lock_all(dev);
drm_connector_list_iter_begin(dev, &iter);
drm_for_each_connector_iter(connector, &iter)
drm_helper_connector_dpms(connector,
DRM_MODE_DPMS_ON);
drm_connector_list_iter_end(&iter);
drm_modeset_unlock_all(dev);
}
if (fbcon)
amdgpu_fbdev_set_suspend(adev, 0);
}
drm_kms_helper_poll_enable(dev);

View File

@ -1310,3 +1310,92 @@ bool amdgpu_crtc_get_scanout_position(struct drm_crtc *crtc,
return amdgpu_display_get_crtc_scanoutpos(dev, pipe, 0, vpos, hpos,
stime, etime, mode);
}
int amdgpu_display_suspend_helper(struct amdgpu_device *adev)
{
struct drm_device *dev = adev_to_drm(adev);
struct drm_crtc *crtc;
struct drm_connector *connector;
struct drm_connector_list_iter iter;
int r;
/* turn off display hw */
drm_modeset_lock_all(dev);
drm_connector_list_iter_begin(dev, &iter);
drm_for_each_connector_iter(connector, &iter)
drm_helper_connector_dpms(connector,
DRM_MODE_DPMS_OFF);
drm_connector_list_iter_end(&iter);
drm_modeset_unlock_all(dev);
/* unpin the front buffers and cursors */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_framebuffer *fb = crtc->primary->fb;
struct amdgpu_bo *robj;
if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
r = amdgpu_bo_reserve(aobj, true);
if (r == 0) {
amdgpu_bo_unpin(aobj);
amdgpu_bo_unreserve(aobj);
}
}
if (fb == NULL || fb->obj[0] == NULL) {
continue;
}
robj = gem_to_amdgpu_bo(fb->obj[0]);
/* don't unpin kernel fb objects */
if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
r = amdgpu_bo_reserve(robj, true);
if (r == 0) {
amdgpu_bo_unpin(robj);
amdgpu_bo_unreserve(robj);
}
}
}
return r;
}
int amdgpu_display_resume_helper(struct amdgpu_device *adev)
{
struct drm_device *dev = adev_to_drm(adev);
struct drm_connector *connector;
struct drm_connector_list_iter iter;
struct drm_crtc *crtc;
int r;
/* pin cursors */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
r = amdgpu_bo_reserve(aobj, true);
if (r == 0) {
r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
if (r != 0)
dev_err(adev->dev, "Failed to pin cursor BO (%d)\n", r);
amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
amdgpu_bo_unreserve(aobj);
}
}
}
drm_helper_resume_force_mode(dev);
/* turn on display hw */
drm_modeset_lock_all(dev);
drm_connector_list_iter_begin(dev, &iter);
drm_for_each_connector_iter(connector, &iter)
drm_helper_connector_dpms(connector,
DRM_MODE_DPMS_ON);
drm_connector_list_iter_end(&iter);
drm_modeset_unlock_all(dev);
return 0;
}

View File

@ -47,4 +47,7 @@ amdgpu_display_user_framebuffer_create(struct drm_device *dev,
const struct drm_format_info *
amdgpu_lookup_format_info(u32 format, uint64_t modifier);
int amdgpu_display_suspend_helper(struct amdgpu_device *adev);
int amdgpu_display_resume_helper(struct amdgpu_device *adev);
#endif

View File

@ -1107,6 +1107,7 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x73A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
{0x1002, 0x73AB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
{0x1002, 0x73AE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
{0x1002, 0x73AF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
{0x1002, 0x73BF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
/* Van Gogh */
@ -1274,24 +1275,35 @@ amdgpu_pci_shutdown(struct pci_dev *pdev)
*/
if (!amdgpu_passthrough(adev))
adev->mp1_state = PP_MP1_STATE_UNLOAD;
adev->in_poweroff_reboot_com = true;
amdgpu_device_ip_suspend(adev);
adev->in_poweroff_reboot_com = false;
adev->mp1_state = PP_MP1_STATE_NONE;
}
static int amdgpu_pmops_suspend(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(drm_dev);
int r;
return amdgpu_device_suspend(drm_dev, true);
if (amdgpu_acpi_is_s0ix_supported(adev))
adev->in_s0ix = true;
adev->in_s3 = true;
r = amdgpu_device_suspend(drm_dev, true);
adev->in_s3 = false;
return r;
}
static int amdgpu_pmops_resume(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(drm_dev);
int r;
return amdgpu_device_resume(drm_dev, true);
r = amdgpu_device_resume(drm_dev, true);
if (amdgpu_acpi_is_s0ix_supported(adev))
adev->in_s0ix = false;
return r;
}
static int amdgpu_pmops_freeze(struct device *dev)
@ -1300,9 +1312,9 @@ static int amdgpu_pmops_freeze(struct device *dev)
struct amdgpu_device *adev = drm_to_adev(drm_dev);
int r;
adev->in_hibernate = true;
adev->in_s4 = true;
r = amdgpu_device_suspend(drm_dev, true);
adev->in_hibernate = false;
adev->in_s4 = false;
if (r)
return r;
return amdgpu_asic_reset(adev);
@ -1318,13 +1330,8 @@ static int amdgpu_pmops_thaw(struct device *dev)
static int amdgpu_pmops_poweroff(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(drm_dev);
int r;
adev->in_poweroff_reboot_com = true;
r = amdgpu_device_suspend(drm_dev, true);
adev->in_poweroff_reboot_com = false;
return r;
return amdgpu_device_suspend(drm_dev, true);
}
static int amdgpu_pmops_restore(struct device *dev)

View File

@ -1028,13 +1028,10 @@ int amdgpu_bo_evict_vram(struct amdgpu_device *adev)
{
struct ttm_resource_manager *man;
/* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
#ifndef CONFIG_HIBERNATION
if (adev->flags & AMD_IS_APU) {
/* Useless to evict on IGP chips */
if (adev->in_s3 && (adev->flags & AMD_IS_APU)) {
/* No need to evict vram on APUs for suspend to ram */
return 0;
}
#endif
man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
return ttm_resource_manager_evict_all(&adev->mman.bdev, man);

View File

@ -2897,6 +2897,11 @@ static int dce_v10_0_hw_fini(void *handle)
static int dce_v10_0_suspend(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r;
r = amdgpu_display_suspend_helper(adev);
if (r)
return r;
adev->mode_info.bl_level =
amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
@ -2921,8 +2926,10 @@ static int dce_v10_0_resume(void *handle)
amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
bl_level);
}
if (ret)
return ret;
return ret;
return amdgpu_display_resume_helper(adev);
}
static bool dce_v10_0_is_idle(void *handle)

View File

@ -3027,6 +3027,11 @@ static int dce_v11_0_hw_fini(void *handle)
static int dce_v11_0_suspend(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r;
r = amdgpu_display_suspend_helper(adev);
if (r)
return r;
adev->mode_info.bl_level =
amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
@ -3051,8 +3056,10 @@ static int dce_v11_0_resume(void *handle)
amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
bl_level);
}
if (ret)
return ret;
return ret;
return amdgpu_display_resume_helper(adev);
}
static bool dce_v11_0_is_idle(void *handle)

View File

@ -2770,7 +2770,11 @@ static int dce_v6_0_hw_fini(void *handle)
static int dce_v6_0_suspend(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r;
r = amdgpu_display_suspend_helper(adev);
if (r)
return r;
adev->mode_info.bl_level =
amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
@ -2794,8 +2798,10 @@ static int dce_v6_0_resume(void *handle)
amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
bl_level);
}
if (ret)
return ret;
return ret;
return amdgpu_display_resume_helper(adev);
}
static bool dce_v6_0_is_idle(void *handle)

View File

@ -2796,6 +2796,11 @@ static int dce_v8_0_hw_fini(void *handle)
static int dce_v8_0_suspend(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r;
r = amdgpu_display_suspend_helper(adev);
if (r)
return r;
adev->mode_info.bl_level =
amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
@ -2820,8 +2825,10 @@ static int dce_v8_0_resume(void *handle)
amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
bl_level);
}
if (ret)
return ret;
return ret;
return amdgpu_display_resume_helper(adev);
}
static bool dce_v8_0_is_idle(void *handle)

View File

@ -39,6 +39,7 @@
#include "dce_v11_0.h"
#include "dce_virtual.h"
#include "ivsrcid/ivsrcid_vislands30.h"
#include "amdgpu_display.h"
#define DCE_VIRTUAL_VBLANK_PERIOD 16666666
@ -491,12 +492,24 @@ static int dce_virtual_hw_fini(void *handle)
static int dce_virtual_suspend(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r;
r = amdgpu_display_suspend_helper(adev);
if (r)
return r;
return dce_virtual_hw_fini(handle);
}
static int dce_virtual_resume(void *handle)
{
return dce_virtual_hw_init(handle);
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r;
r = dce_virtual_hw_init(handle);
if (r)
return r;
return amdgpu_display_resume_helper(adev);
}
static bool dce_virtual_is_idle(void *handle)

View File

@ -341,8 +341,7 @@ void enc2_hw_init(struct link_encoder *enc)
} else {
AUX_REG_WRITE(AUX_DPHY_RX_CONTROL0, 0x103d1110);
AUX_REG_WRITE(AUX_DPHY_TX_CONTROL, 0x21c4d);
AUX_REG_WRITE(AUX_DPHY_TX_CONTROL, 0x21c7a);
}
//AUX_DPHY_TX_REF_CONTROL'AUX_TX_REF_DIV HW default is 0x32;

View File

@ -587,6 +587,48 @@ static int smu7_force_switch_to_arbf0(struct pp_hwmgr *hwmgr)
tmp, MC_CG_ARB_FREQ_F0);
}
static uint16_t smu7_override_pcie_speed(struct pp_hwmgr *hwmgr)
{
struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
uint16_t pcie_gen = 0;
if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 &&
adev->pm.pcie_gen_mask & CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4)
pcie_gen = 3;
else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 &&
adev->pm.pcie_gen_mask & CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3)
pcie_gen = 2;
else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 &&
adev->pm.pcie_gen_mask & CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2)
pcie_gen = 1;
else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 &&
adev->pm.pcie_gen_mask & CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1)
pcie_gen = 0;
return pcie_gen;
}
static uint16_t smu7_override_pcie_width(struct pp_hwmgr *hwmgr)
{
struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
uint16_t pcie_width = 0;
if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
pcie_width = 16;
else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
pcie_width = 12;
else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
pcie_width = 8;
else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
pcie_width = 4;
else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
pcie_width = 2;
else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
pcie_width = 1;
return pcie_width;
}
static int smu7_setup_default_pcie_table(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
@ -683,6 +725,11 @@ static int smu7_setup_default_pcie_table(struct pp_hwmgr *hwmgr)
PP_Min_PCIEGen),
get_pcie_lane_support(data->pcie_lane_cap,
PP_Max_PCIELane));
if (data->pcie_dpm_key_disabled)
phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table,
data->dpm_table.pcie_speed_table.count,
smu7_override_pcie_speed(hwmgr), smu7_override_pcie_width(hwmgr));
}
return 0;
}
@ -1248,6 +1295,13 @@ static int smu7_start_dpm(struct pp_hwmgr *hwmgr)
NULL)),
"Failed to enable pcie DPM during DPM Start Function!",
return -EINVAL);
} else {
PP_ASSERT_WITH_CODE(
(0 == smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_PCIeDPM_Disable,
NULL)),
"Failed to disble pcie DPM during DPM Start Function!",
return -EINVAL);
}
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,

View File

@ -54,6 +54,9 @@
#include "smuio/smuio_9_0_offset.h"
#include "smuio/smuio_9_0_sh_mask.h"
#define smnPCIE_LC_SPEED_CNTL 0x11140290
#define smnPCIE_LC_LINK_WIDTH_CNTL 0x11140288
#define HBM_MEMORY_CHANNEL_WIDTH 128
static const uint32_t channel_number[] = {1, 2, 0, 4, 0, 8, 0, 16, 2};
@ -443,8 +446,7 @@ static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
if (PP_CAP(PHM_PlatformCaps_VCEDPM))
data->smu_features[GNLD_DPM_VCE].supported = true;
if (!data->registry_data.pcie_dpm_key_disabled)
data->smu_features[GNLD_DPM_LINK].supported = true;
data->smu_features[GNLD_DPM_LINK].supported = true;
if (!data->registry_data.dcefclk_dpm_key_disabled)
data->smu_features[GNLD_DPM_DCEFCLK].supported = true;
@ -1544,6 +1546,13 @@ static int vega10_override_pcie_parameters(struct pp_hwmgr *hwmgr)
pp_table->PcieLaneCount[i] = pcie_width;
}
if (data->registry_data.pcie_dpm_key_disabled) {
for (i = 0; i < NUM_LINK_LEVELS; i++) {
pp_table->PcieGenSpeed[i] = pcie_gen;
pp_table->PcieLaneCount[i] = pcie_width;
}
}
return 0;
}
@ -2966,6 +2975,14 @@ static int vega10_start_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap)
}
}
if (data->registry_data.pcie_dpm_key_disabled) {
PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
false, data->smu_features[GNLD_DPM_LINK].smu_feature_bitmap),
"Attempt to Disable Link DPM feature Failed!", return -EINVAL);
data->smu_features[GNLD_DPM_LINK].enabled = false;
data->smu_features[GNLD_DPM_LINK].supported = false;
}
return 0;
}
@ -4584,6 +4601,24 @@ static int vega10_set_ppfeature_status(struct pp_hwmgr *hwmgr, uint64_t new_ppfe
return 0;
}
static int vega10_get_current_pcie_link_width_level(struct pp_hwmgr *hwmgr)
{
struct amdgpu_device *adev = hwmgr->adev;
return (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) &
PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK)
>> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
}
static int vega10_get_current_pcie_link_speed_level(struct pp_hwmgr *hwmgr)
{
struct amdgpu_device *adev = hwmgr->adev;
return (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) &
PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK)
>> PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
}
static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
enum pp_clock_type type, char *buf)
{
@ -4592,8 +4627,9 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
struct vega10_single_dpm_table *mclk_table = &(data->dpm_table.mem_table);
struct vega10_single_dpm_table *soc_table = &(data->dpm_table.soc_table);
struct vega10_single_dpm_table *dcef_table = &(data->dpm_table.dcef_table);
struct vega10_pcie_table *pcie_table = &(data->dpm_table.pcie_table);
struct vega10_odn_clock_voltage_dependency_table *podn_vdd_dep = NULL;
uint32_t gen_speed, lane_width, current_gen_speed, current_lane_width;
PPTable_t *pptable = &(data->smc_state_table.pp_table);
int i, now, size = 0, count = 0;
@ -4650,15 +4686,31 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
"*" : "");
break;
case PP_PCIE:
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentLinkIndex, &now);
current_gen_speed =
vega10_get_current_pcie_link_speed_level(hwmgr);
current_lane_width =
vega10_get_current_pcie_link_width_level(hwmgr);
for (i = 0; i < NUM_LINK_LEVELS; i++) {
gen_speed = pptable->PcieGenSpeed[i];
lane_width = pptable->PcieLaneCount[i];
for (i = 0; i < pcie_table->count; i++)
size += sprintf(buf + size, "%d: %s %s\n", i,
(pcie_table->pcie_gen[i] == 0) ? "2.5GT/s, x1" :
(pcie_table->pcie_gen[i] == 1) ? "5.0GT/s, x16" :
(pcie_table->pcie_gen[i] == 2) ? "8.0GT/s, x16" : "",
(i == now) ? "*" : "");
size += sprintf(buf + size, "%d: %s %s %s\n", i,
(gen_speed == 0) ? "2.5GT/s," :
(gen_speed == 1) ? "5.0GT/s," :
(gen_speed == 2) ? "8.0GT/s," :
(gen_speed == 3) ? "16.0GT/s," : "",
(lane_width == 1) ? "x1" :
(lane_width == 2) ? "x2" :
(lane_width == 3) ? "x4" :
(lane_width == 4) ? "x8" :
(lane_width == 5) ? "x12" :
(lane_width == 6) ? "x16" : "",
(current_gen_speed == gen_speed) &&
(current_lane_width == lane_width) ?
"*" : "");
}
break;
case OD_SCLK:
if (hwmgr->od_enabled) {
size = sprintf(buf, "%s:\n", "OD_SCLK");

View File

@ -133,6 +133,7 @@ static void vega12_set_default_registry_data(struct pp_hwmgr *hwmgr)
data->registry_data.auto_wattman_debug = 0;
data->registry_data.auto_wattman_sample_period = 100;
data->registry_data.auto_wattman_threshold = 50;
data->registry_data.pcie_dpm_key_disabled = !(hwmgr->feature_mask & PP_PCIE_DPM_MASK);
}
static int vega12_set_features_platform_caps(struct pp_hwmgr *hwmgr)
@ -539,6 +540,29 @@ static int vega12_override_pcie_parameters(struct pp_hwmgr *hwmgr)
pp_table->PcieLaneCount[i] = pcie_width_arg;
}
/* override to the highest if it's disabled from ppfeaturmask */
if (data->registry_data.pcie_dpm_key_disabled) {
for (i = 0; i < NUM_LINK_LEVELS; i++) {
smu_pcie_arg = (i << 16) | (pcie_gen << 8) | pcie_width;
ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_OverridePcieParameters, smu_pcie_arg,
NULL);
PP_ASSERT_WITH_CODE(!ret,
"[OverridePcieParameters] Attempt to override pcie params failed!",
return ret);
pp_table->PcieGenSpeed[i] = pcie_gen;
pp_table->PcieLaneCount[i] = pcie_width;
}
ret = vega12_enable_smc_features(hwmgr,
false,
data->smu_features[GNLD_DPM_LINK].smu_feature_bitmap);
PP_ASSERT_WITH_CODE(!ret,
"Attempt to Disable DPM LINK Failed!",
return ret);
data->smu_features[GNLD_DPM_LINK].enabled = false;
data->smu_features[GNLD_DPM_LINK].supported = false;
}
return 0;
}

View File

@ -171,6 +171,7 @@ static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr)
data->registry_data.gfxoff_controlled_by_driver = 1;
data->gfxoff_allowed = false;
data->counter_gfxoff = 0;
data->registry_data.pcie_dpm_key_disabled = !(hwmgr->feature_mask & PP_PCIE_DPM_MASK);
}
static int vega20_set_features_platform_caps(struct pp_hwmgr *hwmgr)
@ -884,6 +885,30 @@ static int vega20_override_pcie_parameters(struct pp_hwmgr *hwmgr)
pp_table->PcieLaneCount[i] = pcie_width_arg;
}
/* override to the highest if it's disabled from ppfeaturmask */
if (data->registry_data.pcie_dpm_key_disabled) {
for (i = 0; i < NUM_LINK_LEVELS; i++) {
smu_pcie_arg = (i << 16) | (pcie_gen << 8) | pcie_width;
ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_OverridePcieParameters, smu_pcie_arg,
NULL);
PP_ASSERT_WITH_CODE(!ret,
"[OverridePcieParameters] Attempt to override pcie params failed!",
return ret);
pp_table->PcieGenSpeed[i] = pcie_gen;
pp_table->PcieLaneCount[i] = pcie_width;
}
ret = vega20_enable_smc_features(hwmgr,
false,
data->smu_features[GNLD_DPM_LINK].smu_feature_bitmap);
PP_ASSERT_WITH_CODE(!ret,
"Attempt to Disable DPM LINK Failed!",
return ret);
data->smu_features[GNLD_DPM_LINK].enabled = false;
data->smu_features[GNLD_DPM_LINK].supported = false;
}
return 0;
}

View File

@ -1294,7 +1294,7 @@ static int smu_disable_dpms(struct smu_context *smu)
bool use_baco = !smu->is_apu &&
((amdgpu_in_reset(adev) &&
(amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) ||
((adev->in_runpm || adev->in_hibernate) && amdgpu_asic_supports_baco(adev)));
((adev->in_runpm || adev->in_s4) && amdgpu_asic_supports_baco(adev)));
/*
* For custom pptable uploading, skip the DPM features
@ -1431,7 +1431,8 @@ static int smu_suspend(void *handle)
smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
if (smu->is_apu)
/* skip CGPG when in S0ix */
if (smu->is_apu && !adev->in_s0ix)
smu_set_gfx_cgpg(&adev->smu, false);
return 0;

View File

@ -689,7 +689,8 @@ static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj)
struct page **pages = pvec + pinned;
ret = pin_user_pages_fast(ptr, num_pages,
!userptr->ro ? FOLL_WRITE : 0, pages);
FOLL_WRITE | FOLL_FORCE | FOLL_LONGTERM,
pages);
if (ret < 0) {
unpin_user_pages(pvec, pinned);
kvfree(pvec);

View File

@ -317,12 +317,13 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
if (!new_plane_state->hw.crtc && !old_plane_state->hw.crtc)
return 0;
new_crtc_state->enabled_planes |= BIT(plane->id);
ret = plane->check_plane(new_crtc_state, new_plane_state);
if (ret)
return ret;
if (fb)
new_crtc_state->enabled_planes |= BIT(plane->id);
/* FIXME pre-g4x don't work like this */
if (new_plane_state->uapi.visible)
new_crtc_state->active_planes |= BIT(plane->id);

View File

@ -3619,9 +3619,7 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
{
int ret;
intel_dp_lttpr_init(intel_dp);
if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd))
if (intel_dp_init_lttpr_and_dprx_caps(intel_dp) < 0)
return false;
/*

View File

@ -133,6 +133,7 @@ static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
else
precharge = 5;
/* Max timeout value on G4x-BDW: 1.6ms */
if (IS_BROADWELL(dev_priv))
timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
else
@ -159,6 +160,12 @@ static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
u32 ret;
/*
* Max timeout values:
* SKL-GLK: 1.6ms
* CNL: 3.2ms
* ICL+: 4ms
*/
ret = DP_AUX_CH_CTL_SEND_BUSY |
DP_AUX_CH_CTL_DONE |
DP_AUX_CH_CTL_INTERRUPT |

View File

@ -34,6 +34,11 @@ intel_dp_dump_link_status(const u8 link_status[DP_LINK_STATUS_SIZE])
link_status[3], link_status[4], link_status[5]);
}
static void intel_dp_reset_lttpr_common_caps(struct intel_dp *intel_dp)
{
memset(&intel_dp->lttpr_common_caps, 0, sizeof(intel_dp->lttpr_common_caps));
}
static void intel_dp_reset_lttpr_count(struct intel_dp *intel_dp)
{
intel_dp->lttpr_common_caps[DP_PHY_REPEATER_CNT -
@ -81,19 +86,36 @@ static void intel_dp_read_lttpr_phy_caps(struct intel_dp *intel_dp,
static bool intel_dp_read_lttpr_common_caps(struct intel_dp *intel_dp)
{
if (drm_dp_read_lttpr_common_caps(&intel_dp->aux,
intel_dp->lttpr_common_caps) < 0) {
memset(intel_dp->lttpr_common_caps, 0,
sizeof(intel_dp->lttpr_common_caps));
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
if (intel_dp_is_edp(intel_dp))
return false;
}
/*
* Detecting LTTPRs must be avoided on platforms with an AUX timeout
* period < 3.2ms. (see DP Standard v2.0, 2.11.2, 3.6.6.1).
*/
if (INTEL_GEN(i915) < 10)
return false;
if (drm_dp_read_lttpr_common_caps(&intel_dp->aux,
intel_dp->lttpr_common_caps) < 0)
goto reset_caps;
drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
"LTTPR common capabilities: %*ph\n",
(int)sizeof(intel_dp->lttpr_common_caps),
intel_dp->lttpr_common_caps);
/* The minimum value of LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV is 1.4 */
if (intel_dp->lttpr_common_caps[0] < 0x14)
goto reset_caps;
return true;
reset_caps:
intel_dp_reset_lttpr_common_caps(intel_dp);
return false;
}
static bool
@ -106,33 +128,49 @@ intel_dp_set_lttpr_transparent_mode(struct intel_dp *intel_dp, bool enable)
}
/**
* intel_dp_lttpr_init - detect LTTPRs and init the LTTPR link training mode
* intel_dp_init_lttpr_and_dprx_caps - detect LTTPR and DPRX caps, init the LTTPR link training mode
* @intel_dp: Intel DP struct
*
* Read the LTTPR common capabilities, switch to non-transparent link training
* mode if any is detected and read the PHY capabilities for all detected
* LTTPRs. In case of an LTTPR detection error or if the number of
* Read the LTTPR common and DPRX capabilities and switch to non-transparent
* link training mode if any is detected and read the PHY capabilities for all
* detected LTTPRs. In case of an LTTPR detection error or if the number of
* LTTPRs is more than is supported (8), fall back to the no-LTTPR,
* transparent mode link training mode.
*
* Returns:
* >0 if LTTPRs were detected and the non-transparent LT mode was set
* >0 if LTTPRs were detected and the non-transparent LT mode was set. The
* DPRX capabilities are read out.
* 0 if no LTTPRs or more than 8 LTTPRs were detected or in case of a
* detection failure and the transparent LT mode was set
* detection failure and the transparent LT mode was set. The DPRX
* capabilities are read out.
* <0 Reading out the DPRX capabilities failed.
*/
int intel_dp_lttpr_init(struct intel_dp *intel_dp)
int intel_dp_init_lttpr_and_dprx_caps(struct intel_dp *intel_dp)
{
int lttpr_count;
bool ret;
int i;
if (intel_dp_is_edp(intel_dp))
return 0;
ret = intel_dp_read_lttpr_common_caps(intel_dp);
/* The DPTX shall read the DPRX caps after LTTPR detection. */
if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd)) {
intel_dp_reset_lttpr_common_caps(intel_dp);
return -EIO;
}
if (!ret)
return 0;
/*
* The 0xF0000-0xF02FF range is only valid if the DPCD revision is
* at least 1.4.
*/
if (intel_dp->dpcd[DP_DPCD_REV] < 0x14) {
intel_dp_reset_lttpr_common_caps(intel_dp);
return 0;
}
lttpr_count = drm_dp_lttpr_count(intel_dp->lttpr_common_caps);
/*
* Prevent setting LTTPR transparent mode explicitly if no LTTPRs are
@ -172,7 +210,7 @@ int intel_dp_lttpr_init(struct intel_dp *intel_dp)
return lttpr_count;
}
EXPORT_SYMBOL(intel_dp_lttpr_init);
EXPORT_SYMBOL(intel_dp_init_lttpr_and_dprx_caps);
static u8 dp_voltage_max(u8 preemph)
{
@ -807,7 +845,10 @@ void intel_dp_start_link_train(struct intel_dp *intel_dp,
* TODO: Reiniting LTTPRs here won't be needed once proper connector
* HW state readout is added.
*/
int lttpr_count = intel_dp_lttpr_init(intel_dp);
int lttpr_count = intel_dp_init_lttpr_and_dprx_caps(intel_dp);
if (lttpr_count < 0)
return;
if (!intel_dp_link_train_all_phys(intel_dp, crtc_state, lttpr_count))
intel_dp_schedule_fallback_link_training(intel_dp, crtc_state);

View File

@ -11,7 +11,7 @@
struct intel_crtc_state;
struct intel_dp;
int intel_dp_lttpr_init(struct intel_dp *intel_dp);
int intel_dp_init_lttpr_and_dprx_caps(struct intel_dp *intel_dp);
void intel_dp_get_adjust_train(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,

View File

@ -1014,20 +1014,14 @@ static i915_reg_t dss_ctl1_reg(const struct intel_crtc_state *crtc_state)
{
enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
if (crtc_state->cpu_transcoder == TRANSCODER_EDP)
return DSS_CTL1;
return ICL_PIPE_DSS_CTL1(pipe);
return is_pipe_dsc(crtc_state) ? ICL_PIPE_DSS_CTL1(pipe) : DSS_CTL1;
}
static i915_reg_t dss_ctl2_reg(const struct intel_crtc_state *crtc_state)
{
enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
if (crtc_state->cpu_transcoder == TRANSCODER_EDP)
return DSS_CTL2;
return ICL_PIPE_DSS_CTL2(pipe);
return is_pipe_dsc(crtc_state) ? ICL_PIPE_DSS_CTL2(pipe) : DSS_CTL2;
}
void intel_dsc_enable(struct intel_encoder *encoder,

View File

@ -316,7 +316,18 @@ void i915_vma_revoke_fence(struct i915_vma *vma)
WRITE_ONCE(fence->vma, NULL);
vma->fence = NULL;
with_intel_runtime_pm_if_in_use(fence_to_uncore(fence)->rpm, wakeref)
/*
* Skip the write to HW if and only if the device is currently
* suspended.
*
* If the driver does not currently hold a wakeref (if_in_use == 0),
* the device may currently be runtime suspended, or it may be woken
* up before the suspend takes place. If the device is not suspended
* (powered down) and we skip clearing the fence register, the HW is
* left in an undefined state where we may end up with multiple
* registers overlapping.
*/
with_intel_runtime_pm_if_active(fence_to_uncore(fence)->rpm, wakeref)
fence_write(fence);
}

View File

@ -412,12 +412,20 @@ intel_wakeref_t intel_runtime_pm_get(struct intel_runtime_pm *rpm)
}
/**
* intel_runtime_pm_get_if_in_use - grab a runtime pm reference if device in use
* __intel_runtime_pm_get_if_active - grab a runtime pm reference if device is active
* @rpm: the intel_runtime_pm structure
* @ignore_usecount: get a ref even if dev->power.usage_count is 0
*
* This function grabs a device-level runtime pm reference if the device is
* already in use and ensures that it is powered up. It is illegal to try
* and access the HW should intel_runtime_pm_get_if_in_use() report failure.
* already active and ensures that it is powered up. It is illegal to try
* and access the HW should intel_runtime_pm_get_if_active() report failure.
*
* If @ignore_usecount=true, a reference will be acquired even if there is no
* user requiring the device to be powered up (dev->power.usage_count == 0).
* If the function returns false in this case then it's guaranteed that the
* device's runtime suspend hook has been called already or that it will be
* called (and hence it's also guaranteed that the device's runtime resume
* hook will be called eventually).
*
* Any runtime pm reference obtained by this function must have a symmetric
* call to intel_runtime_pm_put() to release the reference again.
@ -425,7 +433,8 @@ intel_wakeref_t intel_runtime_pm_get(struct intel_runtime_pm *rpm)
* Returns: the wakeref cookie to pass to intel_runtime_pm_put(), evaluates
* as True if the wakeref was acquired, or False otherwise.
*/
intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm)
static intel_wakeref_t __intel_runtime_pm_get_if_active(struct intel_runtime_pm *rpm,
bool ignore_usecount)
{
if (IS_ENABLED(CONFIG_PM)) {
/*
@ -434,7 +443,7 @@ intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm)
* function, since the power state is undefined. This applies
* atm to the late/early system suspend/resume handlers.
*/
if (pm_runtime_get_if_in_use(rpm->kdev) <= 0)
if (pm_runtime_get_if_active(rpm->kdev, ignore_usecount) <= 0)
return 0;
}
@ -443,6 +452,16 @@ intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm)
return track_intel_runtime_pm_wakeref(rpm);
}
intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm)
{
return __intel_runtime_pm_get_if_active(rpm, false);
}
intel_wakeref_t intel_runtime_pm_get_if_active(struct intel_runtime_pm *rpm)
{
return __intel_runtime_pm_get_if_active(rpm, true);
}
/**
* intel_runtime_pm_get_noresume - grab a runtime pm reference
* @rpm: the intel_runtime_pm structure

View File

@ -177,6 +177,7 @@ void intel_runtime_pm_driver_release(struct intel_runtime_pm *rpm);
intel_wakeref_t intel_runtime_pm_get(struct intel_runtime_pm *rpm);
intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm);
intel_wakeref_t intel_runtime_pm_get_if_active(struct intel_runtime_pm *rpm);
intel_wakeref_t intel_runtime_pm_get_noresume(struct intel_runtime_pm *rpm);
intel_wakeref_t intel_runtime_pm_get_raw(struct intel_runtime_pm *rpm);
@ -188,6 +189,10 @@ intel_wakeref_t intel_runtime_pm_get_raw(struct intel_runtime_pm *rpm);
for ((wf) = intel_runtime_pm_get_if_in_use(rpm); (wf); \
intel_runtime_pm_put((rpm), (wf)), (wf) = 0)
#define with_intel_runtime_pm_if_active(rpm, wf) \
for ((wf) = intel_runtime_pm_get_if_active(rpm); (wf); \
intel_runtime_pm_put((rpm), (wf)), (wf) = 0)
void intel_runtime_pm_put_unchecked(struct intel_runtime_pm *rpm);
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
void intel_runtime_pm_put(struct intel_runtime_pm *rpm, intel_wakeref_t wref);

View File

@ -304,7 +304,7 @@ int a5xx_power_init(struct msm_gpu *gpu)
/* Set up the limits management */
if (adreno_is_a530(adreno_gpu))
a530_lm_setup(gpu);
else
else if (adreno_is_a540(adreno_gpu))
a540_lm_setup(gpu);
/* Set up SP/TP power collpase */

View File

@ -339,7 +339,7 @@ void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
else
bit = a6xx_gmu_oob_bits[state].ack_new;
gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, bit);
gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 1 << bit);
}
/* Enable CPU control of SPTP power power collapse */

View File

@ -522,28 +522,73 @@ static int a6xx_cp_init(struct msm_gpu *gpu)
return a6xx_idle(gpu, ring) ? 0 : -EINVAL;
}
static void a6xx_ucode_check_version(struct a6xx_gpu *a6xx_gpu,
/*
* Check that the microcode version is new enough to include several key
* security fixes. Return true if the ucode is safe.
*/
static bool a6xx_ucode_check_version(struct a6xx_gpu *a6xx_gpu,
struct drm_gem_object *obj)
{
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
struct msm_gpu *gpu = &adreno_gpu->base;
u32 *buf = msm_gem_get_vaddr(obj);
bool ret = false;
if (IS_ERR(buf))
return;
return false;
/*
* If the lowest nibble is 0xa that is an indication that this microcode
* has been patched. The actual version is in dword [3] but we only care
* about the patchlevel which is the lowest nibble of dword [3]
*
* Otherwise check that the firmware is greater than or equal to 1.90
* which was the first version that had this fix built in
* Targets up to a640 (a618, a630 and a640) need to check for a
* microcode version that is patched to support the whereami opcode or
* one that is new enough to include it by default.
*/
if (((buf[0] & 0xf) == 0xa) && (buf[2] & 0xf) >= 1)
a6xx_gpu->has_whereami = true;
else if ((buf[0] & 0xfff) > 0x190)
a6xx_gpu->has_whereami = true;
if (adreno_is_a618(adreno_gpu) || adreno_is_a630(adreno_gpu) ||
adreno_is_a640(adreno_gpu)) {
/*
* If the lowest nibble is 0xa that is an indication that this
* microcode has been patched. The actual version is in dword
* [3] but we only care about the patchlevel which is the lowest
* nibble of dword [3]
*
* Otherwise check that the firmware is greater than or equal
* to 1.90 which was the first version that had this fix built
* in
*/
if ((((buf[0] & 0xf) == 0xa) && (buf[2] & 0xf) >= 1) ||
(buf[0] & 0xfff) >= 0x190) {
a6xx_gpu->has_whereami = true;
ret = true;
goto out;
}
DRM_DEV_ERROR(&gpu->pdev->dev,
"a630 SQE ucode is too old. Have version %x need at least %x\n",
buf[0] & 0xfff, 0x190);
} else {
/*
* a650 tier targets don't need whereami but still need to be
* equal to or newer than 1.95 for other security fixes
*/
if (adreno_is_a650(adreno_gpu)) {
if ((buf[0] & 0xfff) >= 0x195) {
ret = true;
goto out;
}
DRM_DEV_ERROR(&gpu->pdev->dev,
"a650 SQE ucode is too old. Have version %x need at least %x\n",
buf[0] & 0xfff, 0x195);
}
/*
* When a660 is added those targets should return true here
* since those have all the critical security fixes built in
* from the start
*/
}
out:
msm_gem_put_vaddr(obj);
return ret;
}
static int a6xx_ucode_init(struct msm_gpu *gpu)
@ -566,7 +611,13 @@ static int a6xx_ucode_init(struct msm_gpu *gpu)
}
msm_gem_object_set_name(a6xx_gpu->sqe_bo, "sqefw");
a6xx_ucode_check_version(a6xx_gpu, a6xx_gpu->sqe_bo);
if (!a6xx_ucode_check_version(a6xx_gpu, a6xx_gpu->sqe_bo)) {
msm_gem_unpin_iova(a6xx_gpu->sqe_bo, gpu->aspace);
drm_gem_object_put(a6xx_gpu->sqe_bo);
a6xx_gpu->sqe_bo = NULL;
return -EPERM;
}
}
gpu_write64(gpu, REG_A6XX_CP_SQE_INSTR_BASE_LO,
@ -1350,35 +1401,20 @@ static int a6xx_set_supported_hw(struct device *dev, struct a6xx_gpu *a6xx_gpu,
u32 revn)
{
struct opp_table *opp_table;
struct nvmem_cell *cell;
u32 supp_hw = UINT_MAX;
void *buf;
u16 speedbin;
int ret;
cell = nvmem_cell_get(dev, "speed_bin");
/*
* -ENOENT means that the platform doesn't support speedbin which is
* fine
*/
if (PTR_ERR(cell) == -ENOENT)
return 0;
else if (IS_ERR(cell)) {
ret = nvmem_cell_read_u16(dev, "speed_bin", &speedbin);
if (ret) {
DRM_DEV_ERROR(dev,
"failed to read speed-bin. Some OPPs may not be supported by hardware");
"failed to read speed-bin (%d). Some OPPs may not be supported by hardware",
ret);
goto done;
}
speedbin = le16_to_cpu(speedbin);
buf = nvmem_cell_read(cell, NULL);
if (IS_ERR(buf)) {
nvmem_cell_put(cell);
DRM_DEV_ERROR(dev,
"failed to read speed-bin. Some OPPs may not be supported by hardware");
goto done;
}
supp_hw = fuse_to_supp_hw(dev, revn, *((u32 *) buf));
kfree(buf);
nvmem_cell_put(cell);
supp_hw = fuse_to_supp_hw(dev, revn, speedbin);
done:
opp_table = dev_pm_opp_set_supported_hw(dev, &supp_hw, 1);

View File

@ -43,6 +43,8 @@
#define DPU_DEBUGFS_DIR "msm_dpu"
#define DPU_DEBUGFS_HWMASKNAME "hw_log_mask"
#define MIN_IB_BW 400000000ULL /* Min ib vote 400MB */
static int dpu_kms_hw_init(struct msm_kms *kms);
static void _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms);
@ -931,6 +933,9 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
DPU_DEBUG("REG_DMA is not defined");
}
if (of_device_is_compatible(dev->dev->of_node, "qcom,sc7180-mdss"))
dpu_kms_parse_data_bus_icc_path(dpu_kms);
pm_runtime_get_sync(&dpu_kms->pdev->dev);
dpu_kms->core_rev = readl_relaxed(dpu_kms->mmio + 0x0);
@ -1032,9 +1037,6 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
dpu_vbif_init_memtypes(dpu_kms);
if (of_device_is_compatible(dev->dev->of_node, "qcom,sc7180-mdss"))
dpu_kms_parse_data_bus_icc_path(dpu_kms);
pm_runtime_put_sync(&dpu_kms->pdev->dev);
return 0;
@ -1191,10 +1193,10 @@ static int __maybe_unused dpu_runtime_resume(struct device *dev)
ddev = dpu_kms->dev;
WARN_ON(!(dpu_kms->num_paths));
/* Min vote of BW is required before turning on AXI clk */
for (i = 0; i < dpu_kms->num_paths; i++)
icc_set_bw(dpu_kms->path[i], 0,
dpu_kms->catalog->perf.min_dram_ib);
icc_set_bw(dpu_kms->path[i], 0, Bps_to_icc(MIN_IB_BW));
rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, true);
if (rc) {

View File

@ -32,6 +32,8 @@ struct dp_aux_private {
struct drm_dp_aux dp_aux;
};
#define MAX_AUX_RETRIES 5
static const char *dp_aux_get_error(u32 aux_error)
{
switch (aux_error) {
@ -377,6 +379,11 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux,
ret = dp_aux_cmd_fifo_tx(aux, msg);
if (ret < 0) {
if (aux->native) {
aux->retry_cnt++;
if (!(aux->retry_cnt % MAX_AUX_RETRIES))
dp_catalog_aux_update_cfg(aux->catalog);
}
usleep_range(400, 500); /* at least 400us to next try */
goto unlock_exit;
}

View File

@ -163,7 +163,7 @@ struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev,
break;
case MSM_DSI_PHY_7NM:
case MSM_DSI_PHY_7NM_V4_1:
pll = msm_dsi_pll_7nm_init(pdev, id);
pll = msm_dsi_pll_7nm_init(pdev, type, id);
break;
default:
pll = ERR_PTR(-ENXIO);

View File

@ -117,10 +117,12 @@ msm_dsi_pll_10nm_init(struct platform_device *pdev, int id)
}
#endif
#ifdef CONFIG_DRM_MSM_DSI_7NM_PHY
struct msm_dsi_pll *msm_dsi_pll_7nm_init(struct platform_device *pdev, int id);
struct msm_dsi_pll *msm_dsi_pll_7nm_init(struct platform_device *pdev,
enum msm_dsi_phy_type type, int id);
#else
static inline struct msm_dsi_pll *
msm_dsi_pll_7nm_init(struct platform_device *pdev, int id)
msm_dsi_pll_7nm_init(struct platform_device *pdev,
enum msm_dsi_phy_type type, int id)
{
return ERR_PTR(-ENODEV);
}

View File

@ -325,7 +325,7 @@ static void dsi_pll_commit(struct dsi_pll_7nm *pll)
pll_write(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_LOW_1, reg->frac_div_start_low);
pll_write(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_MID_1, reg->frac_div_start_mid);
pll_write(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_HIGH_1, reg->frac_div_start_high);
pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_LOCKDET_RATE_1, 0x40);
pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_LOCKDET_RATE_1, reg->pll_lockdet_rate);
pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_LOCK_DELAY, 0x06);
pll_write(base + REG_DSI_7nm_PHY_PLL_CMODE_1, 0x10); /* TODO: 0x00 for CPHY */
pll_write(base + REG_DSI_7nm_PHY_PLL_CLOCK_INVERTERS, reg->pll_clock_inverters);
@ -509,6 +509,7 @@ static unsigned long dsi_pll_7nm_vco_recalc_rate(struct clk_hw *hw,
{
struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll);
struct dsi_pll_config *config = &pll_7nm->pll_configuration;
void __iomem *base = pll_7nm->mmio;
u64 ref_clk = pll_7nm->vco_ref_clk_rate;
u64 vco_rate = 0x0;
@ -529,9 +530,8 @@ static unsigned long dsi_pll_7nm_vco_recalc_rate(struct clk_hw *hw,
/*
* TODO:
* 1. Assumes prescaler is disabled
* 2. Multiplier is 2^18. it should be 2^(num_of_frac_bits)
*/
multiplier = 1 << 18;
multiplier = 1 << config->frac_bits;
pll_freq = dec * (ref_clk * 2);
tmp64 = (ref_clk * 2 * frac);
pll_freq += div_u64(tmp64, multiplier);
@ -852,7 +852,8 @@ err_base_clk_hw:
return ret;
}
struct msm_dsi_pll *msm_dsi_pll_7nm_init(struct platform_device *pdev, int id)
struct msm_dsi_pll *msm_dsi_pll_7nm_init(struct platform_device *pdev,
enum msm_dsi_phy_type type, int id)
{
struct dsi_pll_7nm *pll_7nm;
struct msm_dsi_pll *pll;
@ -885,7 +886,7 @@ struct msm_dsi_pll *msm_dsi_pll_7nm_init(struct platform_device *pdev, int id)
pll = &pll_7nm->base;
pll->min_rate = 1000000000UL;
pll->max_rate = 3500000000UL;
if (pll->type == MSM_DSI_PHY_7NM_V4_1) {
if (type == MSM_DSI_PHY_7NM_V4_1) {
pll->min_rate = 600000000UL;
pll->max_rate = (unsigned long)5000000000ULL;
/* workaround for max rate overflowing on 32-bit builds: */

View File

@ -57,10 +57,13 @@ static void vblank_put(struct msm_kms *kms, unsigned crtc_mask)
static void lock_crtcs(struct msm_kms *kms, unsigned int crtc_mask)
{
int crtc_index;
struct drm_crtc *crtc;
for_each_crtc_mask(kms->dev, crtc, crtc_mask)
mutex_lock(&kms->commit_lock[drm_crtc_index(crtc)]);
for_each_crtc_mask(kms->dev, crtc, crtc_mask) {
crtc_index = drm_crtc_index(crtc);
mutex_lock_nested(&kms->commit_lock[crtc_index], crtc_index);
}
}
static void unlock_crtcs(struct msm_kms *kms, unsigned int crtc_mask)

View File

@ -1072,6 +1072,10 @@ static int __maybe_unused msm_pm_resume(struct device *dev)
static int __maybe_unused msm_pm_prepare(struct device *dev)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct msm_drm_private *priv = ddev ? ddev->dev_private : NULL;
if (!priv || !priv->kms)
return 0;
return drm_mode_config_helper_suspend(ddev);
}
@ -1079,6 +1083,10 @@ static int __maybe_unused msm_pm_prepare(struct device *dev)
static void __maybe_unused msm_pm_complete(struct device *dev)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct msm_drm_private *priv = ddev ? ddev->dev_private : NULL;
if (!priv || !priv->kms)
return;
drm_mode_config_helper_resume(ddev);
}
@ -1311,6 +1319,10 @@ static int msm_pdev_remove(struct platform_device *pdev)
static void msm_pdev_shutdown(struct platform_device *pdev)
{
struct drm_device *drm = platform_get_drvdata(pdev);
struct msm_drm_private *priv = drm ? drm->dev_private : NULL;
if (!priv || !priv->kms)
return;
drm_atomic_helper_shutdown(drm);
}

View File

@ -45,7 +45,7 @@ int msm_wait_fence(struct msm_fence_context *fctx, uint32_t fence,
int ret;
if (fence > fctx->last_fence) {
DRM_ERROR("%s: waiting on invalid fence: %u (of %u)\n",
DRM_ERROR_RATELIMITED("%s: waiting on invalid fence: %u (of %u)\n",
fctx->name, fence, fctx->last_fence);
return -EINVAL;
}

View File

@ -157,7 +157,6 @@ struct msm_kms {
* from the crtc's pending_timer close to end of the frame:
*/
struct mutex commit_lock[MAX_CRTCS];
struct lock_class_key commit_lock_keys[MAX_CRTCS];
unsigned pending_crtc_mask;
struct msm_pending_timer pending_timers[MAX_CRTCS];
};
@ -167,11 +166,8 @@ static inline int msm_kms_init(struct msm_kms *kms,
{
unsigned i, ret;
for (i = 0; i < ARRAY_SIZE(kms->commit_lock); i++) {
lockdep_register_key(&kms->commit_lock_keys[i]);
__mutex_init(&kms->commit_lock[i], "&kms->commit_lock[i]",
&kms->commit_lock_keys[i]);
}
for (i = 0; i < ARRAY_SIZE(kms->commit_lock); i++)
mutex_init(&kms->commit_lock[i]);
kms->funcs = funcs;

View File

@ -2693,9 +2693,20 @@ nv50_display_create(struct drm_device *dev)
else
nouveau_display(dev)->format_modifiers = disp50xx_modifiers;
if (disp->disp->object.oclass >= GK104_DISP) {
/* FIXME: 256x256 cursors are supported on Kepler, however unlike Maxwell and later
* generations Kepler requires that we use small pages (4K) for cursor scanout surfaces. The
* proper fix for this is to teach nouveau to migrate fbs being used for the cursor plane to
* small page allocations in prepare_fb(). When this is implemented, we should also force
* large pages (128K) for ovly fbs in order to fix Kepler ovlys.
* But until then, just limit cursors to 128x128 - which is small enough to avoid ever using
* large pages.
*/
if (disp->disp->object.oclass >= GM107_DISP) {
dev->mode_config.cursor_width = 256;
dev->mode_config.cursor_height = 256;
} else if (disp->disp->object.oclass >= GK104_DISP) {
dev->mode_config.cursor_width = 128;
dev->mode_config.cursor_height = 128;
} else {
dev->mode_config.cursor_width = 64;
dev->mode_config.cursor_height = 64;

View File

@ -48,21 +48,12 @@ static unsigned int rcar_du_encoder_count_ports(struct device_node *node)
static const struct drm_encoder_funcs rcar_du_encoder_funcs = {
};
static void rcar_du_encoder_release(struct drm_device *dev, void *res)
{
struct rcar_du_encoder *renc = res;
drm_encoder_cleanup(&renc->base);
kfree(renc);
}
int rcar_du_encoder_init(struct rcar_du_device *rcdu,
enum rcar_du_output output,
struct device_node *enc_node)
{
struct rcar_du_encoder *renc;
struct drm_bridge *bridge;
int ret;
/*
* Locate the DRM bridge from the DT node. For the DPAD outputs, if the
@ -101,26 +92,16 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu,
return -ENOLINK;
}
renc = kzalloc(sizeof(*renc), GFP_KERNEL);
if (renc == NULL)
return -ENOMEM;
renc->output = output;
dev_dbg(rcdu->dev, "initializing encoder %pOF for output %u\n",
enc_node, output);
ret = drm_encoder_init(&rcdu->ddev, &renc->base, &rcar_du_encoder_funcs,
DRM_MODE_ENCODER_NONE, NULL);
if (ret < 0) {
kfree(renc);
return ret;
}
renc = drmm_encoder_alloc(&rcdu->ddev, struct rcar_du_encoder, base,
&rcar_du_encoder_funcs, DRM_MODE_ENCODER_NONE,
NULL);
if (!renc)
return -ENOMEM;
ret = drmm_add_action_or_reset(&rcdu->ddev, rcar_du_encoder_release,
renc);
if (ret)
return ret;
renc->output = output;
/*
* Attach the bridge to the encoder. The bridge will create the