1
0
Fork 0

drm/amdgpu: refine uvd5.0/6.0 code.

1. delete redundant cg pg mask check.
   pg mask use to ctrl power on/down uvd.
   not start/stop uvd.
   cg mask will be check when enable mgcg.
2. no need to start uvd when initializ.
   when ring test/ib test/encode, uvd was enabled.
   when uvd idle, uvd was stopped.
3. chang cg pg sequence in powerplay.

Signed-off-by: Rex Zhu <Rex.Zhu@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Acked-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
hifive-unleashed-5.1
Rex Zhu 2017-01-20 17:46:34 +08:00 committed by Alex Deucher
parent bac601ec00
commit e3e672e631
4 changed files with 30 additions and 42 deletions

View File

@ -152,9 +152,9 @@ static int uvd_v5_0_hw_init(void *handle)
uint32_t tmp;
int r;
r = uvd_v5_0_start(adev);
if (r)
goto done;
amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
uvd_v5_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
uvd_v5_0_enable_mgcg(adev, true);
ring->ready = true;
r = amdgpu_ring_test_ring(ring);
@ -189,11 +189,13 @@ static int uvd_v5_0_hw_init(void *handle)
amdgpu_ring_write(ring, 3);
amdgpu_ring_commit(ring);
done:
if (!r)
DRM_INFO("UVD initialized successfully.\n");
return r;
}
/**
@ -208,7 +210,9 @@ static int uvd_v5_0_hw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct amdgpu_ring *ring = &adev->uvd.ring;
uvd_v5_0_stop(adev);
if (RREG32(mmUVD_STATUS) != 0)
uvd_v5_0_stop(adev);
ring->ready = false;
return 0;
@ -310,10 +314,6 @@ static int uvd_v5_0_start(struct amdgpu_device *adev)
uvd_v5_0_mc_resume(adev);
amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
uvd_v5_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
uvd_v5_0_enable_mgcg(adev, true);
/* disable interupt */
WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1));
@ -456,6 +456,8 @@ static void uvd_v5_0_stop(struct amdgpu_device *adev)
/* Unstall UMC and register bus */
WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
WREG32(mmUVD_STATUS, 0);
}
/**
@ -792,9 +794,6 @@ static int uvd_v5_0_set_clockgating_state(void *handle,
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
return 0;
if (enable) {
/* wait for STATUS to clear */
if (uvd_v5_0_wait_for_idle(handle))
@ -824,9 +823,6 @@ static int uvd_v5_0_set_powergating_state(void *handle,
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int ret = 0;
if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
return 0;
if (state == AMD_PG_STATE_GATE) {
uvd_v5_0_stop(adev);
adev->uvd.is_powergated = true;

View File

@ -155,9 +155,9 @@ static int uvd_v6_0_hw_init(void *handle)
uint32_t tmp;
int r;
r = uvd_v6_0_start(adev);
if (r)
goto done;
amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
uvd_v6_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
uvd_v6_0_enable_mgcg(adev, true);
ring->ready = true;
r = amdgpu_ring_test_ring(ring);
@ -212,7 +212,9 @@ static int uvd_v6_0_hw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct amdgpu_ring *ring = &adev->uvd.ring;
uvd_v6_0_stop(adev);
if (RREG32(mmUVD_STATUS) != 0)
uvd_v6_0_stop(adev);
ring->ready = false;
return 0;
@ -397,9 +399,6 @@ static int uvd_v6_0_start(struct amdgpu_device *adev)
lmi_swap_cntl = 0;
mp_swap_cntl = 0;
amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
uvd_v6_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
uvd_v6_0_enable_mgcg(adev, true);
uvd_v6_0_mc_resume(adev);
/* disable interupt */
@ -554,6 +553,8 @@ static void uvd_v6_0_stop(struct amdgpu_device *adev)
/* Unstall UMC and register bus */
WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
WREG32(mmUVD_STATUS, 0);
}
/**
@ -1018,9 +1019,6 @@ static int uvd_v6_0_set_clockgating_state(void *handle,
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
return 0;
if (enable) {
/* wait for STATUS to clear */
if (uvd_v6_0_wait_for_idle(handle))
@ -1049,9 +1047,6 @@ static int uvd_v6_0_set_powergating_state(void *handle,
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int ret = 0;
if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
return 0;
WREG32(mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK);
if (state == AMD_PG_STATE_GATE) {

View File

@ -161,28 +161,25 @@ int cz_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
{
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
if (cz_hwmgr->uvd_power_gated == bgate)
return 0;
cz_hwmgr->uvd_power_gated = bgate;
if (bgate) {
cgs_set_clockgating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_UVD,
AMD_CG_STATE_GATE);
cgs_set_powergating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_UVD,
AMD_PG_STATE_GATE);
cgs_set_clockgating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_UVD,
AMD_CG_STATE_GATE);
cz_dpm_update_uvd_dpm(hwmgr, true);
cz_dpm_powerdown_uvd(hwmgr);
} else {
cz_dpm_powerup_uvd(hwmgr);
cgs_set_powergating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_UVD,
AMD_CG_STATE_UNGATE);
cgs_set_clockgating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_UVD,
AMD_PG_STATE_UNGATE);
cgs_set_powergating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_UVD,
AMD_CG_STATE_UNGATE);
cz_dpm_update_uvd_dpm(hwmgr, false);
}

View File

@ -147,22 +147,22 @@ int smu7_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
data->uvd_power_gated = bgate;
if (bgate) {
cgs_set_clockgating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_UVD,
AMD_CG_STATE_GATE);
cgs_set_powergating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_UVD,
AMD_PG_STATE_GATE);
cgs_set_clockgating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_UVD,
AMD_CG_STATE_GATE);
smu7_update_uvd_dpm(hwmgr, true);
smu7_powerdown_uvd(hwmgr);
} else {
smu7_powerup_uvd(hwmgr);
cgs_set_powergating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_UVD,
AMD_CG_STATE_UNGATE);
cgs_set_clockgating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_UVD,
AMD_CG_STATE_UNGATE);
cgs_set_powergating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_UVD,
AMD_CG_STATE_UNGATE);
smu7_update_uvd_dpm(hwmgr, false);
}