Merge branch 'drm-next-3.11' of git://people.freedesktop.org/~agd5f/linux into drm-next

A few more DPM fixes based on user testing.

* 'drm-next-3.11' of git://people.freedesktop.org/~agd5f/linux:
  drm/radeon/dpm: implement vblank_too_short callback for si
  drm/radeon/dpm: implement vblank_too_short callback for cayman
  drm/radeon/dpm: implement vblank_too_short callback for btc
  drm/radeon/dpm: implement vblank_too_short callback for evergreen
  drm/radeon/dpm: implement vblank_too_short callback for 7xx
  drm/radeon/dpm: add checks against vblank time
  drm/radeon/dpm: add helper to calculate vblank time
  drm/radeon: remove stray line in old pm code
  drm/radeon/dpm: fix display_gap programming on rv7xx
  drm/radeon/dpm: implement force performance level for TN
  drm/radeon/dpm: implement force performance level for ON/LN
  drm/radeon/dpm: implement force performance level for SI
  drm/radeon/dpm: implement force performance level for cayman
  drm/radeon/dpm: implement force performance levels for 7xx/eg/btc
  drm/radeon/dpm: add infrastructure to force performance levels
  drm/radeon: fix surface setup on r1xx
  drm/radeon: add support for 3d perf states on older asics
  drm/radeon: set default clocks for SI when DPM is disabled
This commit is contained in:
Dave Airlie 2013-07-09 10:49:39 +10:00
commit 774d8e34e4
23 changed files with 401 additions and 54 deletions

View file

@ -1841,6 +1841,9 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc,
atombios_crtc_set_base(crtc, x, y, old_fb);
atombios_overscan_setup(crtc, mode, adjusted_mode);
atombios_scaler_setup(crtc);
/* update the hw version fpr dpm */
radeon_crtc->hw_mode = *adjusted_mode;
return 0;
}

View file

@ -2059,6 +2059,19 @@ static void btc_init_stutter_mode(struct radeon_device *rdev)
}
}
bool btc_dpm_vblank_too_short(struct radeon_device *rdev)
{
struct rv7xx_power_info *pi = rv770_get_pi(rdev);
u32 vblank_time = r600_dpm_get_vblank_time(rdev);
u32 switch_limit = pi->mem_gddr5 ? 450 : 100;
if (vblank_time < switch_limit)
return true;
else
return false;
}
static void btc_apply_state_adjust_rules(struct radeon_device *rdev,
struct radeon_ps *rps)
{
@ -2068,7 +2081,8 @@ static void btc_apply_state_adjust_rules(struct radeon_device *rdev,
u32 mclk, sclk;
u16 vddc, vddci;
if (rdev->pm.dpm.new_active_crtc_count > 1)
if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
btc_dpm_vblank_too_short(rdev))
disable_mclk_switching = true;
else
disable_mclk_switching = false;
@ -2326,9 +2340,9 @@ int btc_dpm_set_power_state(struct radeon_device *rdev)
return ret;
}
ret = rv770_unrestrict_performance_levels_after_switch(rdev);
ret = rv770_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO);
if (ret) {
DRM_ERROR("rv770_unrestrict_performance_levels_after_switch failed\n");
DRM_ERROR("rv770_dpm_force_performance_level failed\n");
return ret;
}

View file

@ -2014,9 +2014,9 @@ int cypress_dpm_set_power_state(struct radeon_device *rdev)
if (eg_pi->pcie_performance_request)
cypress_notify_link_speed_change_after_state_change(rdev, new_ps, old_ps);
ret = rv770_unrestrict_performance_levels_after_switch(rdev);
ret = rv770_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO);
if (ret) {
DRM_ERROR("rv770_unrestrict_performance_levels_after_switch failed\n");
DRM_ERROR("rv770_dpm_force_performance_level failed\n");
return ret;
}
@ -2174,3 +2174,16 @@ void cypress_dpm_fini(struct radeon_device *rdev)
kfree(rdev->pm.dpm.ps);
kfree(rdev->pm.dpm.priv);
}
bool cypress_dpm_vblank_too_short(struct radeon_device *rdev)
{
struct rv7xx_power_info *pi = rv770_get_pi(rdev);
u32 vblank_time = r600_dpm_get_vblank_time(rdev);
u32 switch_limit = pi->mem_gddr5 ? 450 : 300;
if (vblank_time < switch_limit)
return true;
else
return false;
}

View file

@ -1504,8 +1504,8 @@ void evergreen_pm_misc(struct radeon_device *rdev)
struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
if (voltage->type == VOLTAGE_SW) {
/* 0xff01 is a flag rather then an actual voltage */
if (voltage->voltage == 0xff01)
/* 0xff0x are flags rather then an actual voltage */
if ((voltage->voltage & 0xff00) == 0xff00)
return;
if (voltage->voltage && (voltage->voltage != rdev->pm.current_vddc)) {
radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC);
@ -1525,8 +1525,8 @@ void evergreen_pm_misc(struct radeon_device *rdev)
voltage = &rdev->pm.power_state[req_ps_idx].
clock_info[rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx].voltage;
/* 0xff01 is a flag rather then an actual voltage */
if (voltage->vddci == 0xff01)
/* 0xff0x are flags rather then an actual voltage */
if ((voltage->vddci & 0xff00) == 0xff00)
return;
if (voltage->vddci && (voltage->vddci != rdev->pm.current_vddci)) {
radeon_atom_set_voltage(rdev, voltage->vddci, SET_VOLTAGE_TYPE_ASIC_VDDCI);

View file

@ -765,6 +765,19 @@ static void ni_calculate_leakage_for_v_and_t(struct radeon_device *rdev,
ni_calculate_leakage_for_v_and_t_formula(coeff, v, t, i_leakage, leakage);
}
bool ni_dpm_vblank_too_short(struct radeon_device *rdev)
{
struct rv7xx_power_info *pi = rv770_get_pi(rdev);
u32 vblank_time = r600_dpm_get_vblank_time(rdev);
u32 switch_limit = pi->mem_gddr5 ? 450 : 300;
if (vblank_time < switch_limit)
return true;
else
return false;
}
static void ni_apply_state_adjust_rules(struct radeon_device *rdev,
struct radeon_ps *rps)
{
@ -775,7 +788,8 @@ static void ni_apply_state_adjust_rules(struct radeon_device *rdev,
u16 vddc, vddci;
int i;
if (rdev->pm.dpm.new_active_crtc_count > 1)
if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
ni_dpm_vblank_too_short(rdev))
disable_mclk_switching = true;
else
disable_mclk_switching = false;
@ -1037,13 +1051,37 @@ static int ni_restrict_performance_levels_before_switch(struct radeon_device *rd
0 : -EINVAL;
}
static int ni_unrestrict_performance_levels_after_switch(struct radeon_device *rdev)
int ni_dpm_force_performance_level(struct radeon_device *rdev,
enum radeon_dpm_forced_level level)
{
if (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK)
return -EINVAL;
struct radeon_ps *rps = rdev->pm.dpm.current_ps;
struct ni_ps *ps = ni_get_ps(rps);
u32 levels;
return (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, 0) == PPSMC_Result_OK) ?
0 : -EINVAL;
if (level == RADEON_DPM_FORCED_LEVEL_HIGH) {
if (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, 0) != PPSMC_Result_OK)
return -EINVAL;
if (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetForcedLevels, 1) != PPSMC_Result_OK)
return -EINVAL;
} else if (level == RADEON_DPM_FORCED_LEVEL_LOW) {
if (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK)
return -EINVAL;
levels = ps->performance_level_count - 1;
if (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, levels) != PPSMC_Result_OK)
return -EINVAL;
} else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) {
if (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK)
return -EINVAL;
if (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, 0) != PPSMC_Result_OK)
return -EINVAL;
}
rdev->pm.dpm.forced_level = level;
return 0;
}
static void ni_stop_smc(struct radeon_device *rdev)
@ -3831,9 +3869,9 @@ int ni_dpm_set_power_state(struct radeon_device *rdev)
return ret;
}
ret = ni_unrestrict_performance_levels_after_switch(rdev);
ret = ni_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO);
if (ret) {
DRM_ERROR("ni_unrestrict_performance_levels_after_switch failed\n");
DRM_ERROR("ni_dpm_force_performance_level failed\n");
return ret;
}

View file

@ -245,4 +245,6 @@ void ni_set_uvd_clock_after_set_eng_clock(struct radeon_device *rdev,
struct radeon_ps *new_ps,
struct radeon_ps *old_ps);
bool ni_dpm_vblank_too_short(struct radeon_device *rdev);
#endif

View file

@ -71,6 +71,8 @@ typedef uint8_t PPSMC_Result;
#define PPSMC_MSG_SwitchToSwState ((uint8_t)0x20)
#define PPSMC_MSG_SwitchToInitialState ((uint8_t)0x40)
#define PPSMC_MSG_NoForcedLevel ((uint8_t)0x41)
#define PPSMC_MSG_ForceHigh ((uint8_t)0x42)
#define PPSMC_MSG_ForceMediumOrHigh ((uint8_t)0x43)
#define PPSMC_MSG_SwitchToMinimumPower ((uint8_t)0x51)
#define PPSMC_MSG_ResumeFromMinimumPower ((uint8_t)0x52)
#define PPSMC_MSG_EnableCac ((uint8_t)0x53)
@ -101,6 +103,7 @@ typedef uint8_t PPSMC_Result;
#define PPSMC_MSG_DPM_Config ((uint32_t) 0x102)
#define PPSMC_MSG_DPM_ForceState ((uint32_t) 0x104)
#define PPSMC_MSG_PG_SIMD_Config ((uint32_t) 0x108)
#define PPSMC_MSG_DPM_N_LevelsDisabled ((uint32_t) 0x112)
#define PPSMC_MSG_DCE_RemoveVoltageAdjustment ((uint32_t) 0x11d)
#define PPSMC_MSG_DCE_AllowVoltageAdjustment ((uint32_t) 0x11e)
#define PPSMC_MSG_UVD_DPM_Config ((uint32_t) 0x124)

View file

@ -3077,6 +3077,10 @@ int r100_set_surface_reg(struct radeon_device *rdev, int reg,
flags |= RADEON_SURF_TILE_COLOR_BOTH;
if (tiling_flags & RADEON_TILING_MACRO)
flags |= RADEON_SURF_TILE_COLOR_MACRO;
/* setting pitch to 0 disables tiling */
if ((tiling_flags & (RADEON_TILING_MACRO|RADEON_TILING_MICRO))
== 0)
pitch = 0;
} else if (rdev->family <= CHIP_RV280) {
if (tiling_flags & (RADEON_TILING_MACRO))
flags |= R200_SURF_TILE_COLOR_MACRO;
@ -3094,13 +3098,6 @@ int r100_set_surface_reg(struct radeon_device *rdev, int reg,
if (tiling_flags & RADEON_TILING_SWAP_32BIT)
flags |= RADEON_SURF_AP0_SWP_32BPP | RADEON_SURF_AP1_SWP_32BPP;
/* when we aren't tiling the pitch seems to needs to be furtherdivided down. - tested on power5 + rn50 server */
if (tiling_flags & (RADEON_TILING_SWAP_16BIT | RADEON_TILING_SWAP_32BIT)) {
if (!(tiling_flags & (RADEON_TILING_MACRO | RADEON_TILING_MICRO)))
if (ASIC_IS_RN50(rdev))
pitch /= 16;
}
/* r100/r200 divide by 16 */
if (rdev->family < CHIP_R300)
flags |= pitch / 16;

View file

@ -150,6 +150,30 @@ void r600_dpm_print_ps_status(struct radeon_device *rdev,
printk("\n");
}
u32 r600_dpm_get_vblank_time(struct radeon_device *rdev)
{
struct drm_device *dev = rdev->ddev;
struct drm_crtc *crtc;
struct radeon_crtc *radeon_crtc;
u32 line_time_us, vblank_lines;
u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
radeon_crtc = to_radeon_crtc(crtc);
if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) {
line_time_us = (radeon_crtc->hw_mode.crtc_htotal * 1000) /
radeon_crtc->hw_mode.clock;
vblank_lines = radeon_crtc->hw_mode.crtc_vblank_end -
radeon_crtc->hw_mode.crtc_vdisplay +
(radeon_crtc->v_border * 2);
vblank_time_us = vblank_lines * line_time_us;
break;
}
}
return vblank_time_us;
}
void r600_calculate_u_and_p(u32 i, u32 r_c, u32 p_b,
u32 *p, u32 *u)
{

View file

@ -129,6 +129,7 @@ void r600_dpm_print_class_info(u32 class, u32 class2);
void r600_dpm_print_cap_info(u32 caps);
void r600_dpm_print_ps_status(struct radeon_device *rdev,
struct radeon_ps *rps);
u32 r600_dpm_get_vblank_time(struct radeon_device *rdev);
bool r600_is_uvd_state(u32 class, u32 class2);
void r600_calculate_u_and_p(u32 i, u32 r_c, u32 p_b,
u32 *p, u32 *u);

View file

@ -1100,6 +1100,7 @@ enum radeon_pm_state_type {
POWER_STATE_TYPE_INTERNAL_THERMAL,
POWER_STATE_TYPE_INTERNAL_ACPI,
POWER_STATE_TYPE_INTERNAL_ULV,
POWER_STATE_TYPE_INTERNAL_3DPERF,
};
enum radeon_pm_profile_type {
@ -1334,6 +1335,12 @@ enum radeon_pcie_gen {
RADEON_PCIE_GEN_INVALID = 0xffff
};
enum radeon_dpm_forced_level {
RADEON_DPM_FORCED_LEVEL_AUTO = 0,
RADEON_DPM_FORCED_LEVEL_LOW = 1,
RADEON_DPM_FORCED_LEVEL_HIGH = 2,
};
struct radeon_dpm {
struct radeon_ps *ps;
/* number of valid power states */
@ -1373,6 +1380,8 @@ struct radeon_dpm {
bool uvd_active;
/* thermal handling */
struct radeon_dpm_thermal thermal;
/* forced levels */
enum radeon_dpm_forced_level forced_level;
};
void radeon_dpm_enable_power_state(struct radeon_device *rdev,
@ -1668,6 +1677,8 @@ struct radeon_asic {
u32 (*get_mclk)(struct radeon_device *rdev, bool low);
void (*print_power_state)(struct radeon_device *rdev, struct radeon_ps *ps);
void (*debugfs_print_current_performance_level)(struct radeon_device *rdev, struct seq_file *m);
int (*force_performance_level)(struct radeon_device *rdev, enum radeon_dpm_forced_level level);
bool (*vblank_too_short)(struct radeon_device *rdev);
} dpm;
/* pageflipping */
struct {
@ -2435,6 +2446,8 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
#define radeon_dpm_get_mclk(rdev, l) rdev->asic->dpm.get_mclk((rdev), (l))
#define radeon_dpm_print_power_state(rdev, ps) rdev->asic->dpm.print_power_state((rdev), (ps))
#define radeon_dpm_debugfs_print_current_performance_level(rdev, m) rdev->asic->dpm.debugfs_print_current_performance_level((rdev), (m))
#define radeon_dpm_force_performance_level(rdev, l) rdev->asic->dpm.force_performance_level((rdev), (l))
#define radeon_dpm_vblank_too_short(rdev) rdev->asic->dpm.vblank_too_short((rdev))
/* Common functions */
/* AGP */

View file

@ -1393,6 +1393,8 @@ static struct radeon_asic rv770_asic = {
.get_mclk = &rv770_dpm_get_mclk,
.print_power_state = &rv770_dpm_print_power_state,
.debugfs_print_current_performance_level = &rv770_dpm_debugfs_print_current_performance_level,
.force_performance_level = &rv770_dpm_force_performance_level,
.vblank_too_short = &rv770_dpm_vblank_too_short,
},
.pflip = {
.pre_page_flip = &rs600_pre_page_flip,
@ -1516,6 +1518,8 @@ static struct radeon_asic evergreen_asic = {
.get_mclk = &rv770_dpm_get_mclk,
.print_power_state = &rv770_dpm_print_power_state,
.debugfs_print_current_performance_level = &rv770_dpm_debugfs_print_current_performance_level,
.force_performance_level = &rv770_dpm_force_performance_level,
.vblank_too_short = &cypress_dpm_vblank_too_short,
},
.pflip = {
.pre_page_flip = &evergreen_pre_page_flip,
@ -1639,6 +1643,7 @@ static struct radeon_asic sumo_asic = {
.get_mclk = &sumo_dpm_get_mclk,
.print_power_state = &sumo_dpm_print_power_state,
.debugfs_print_current_performance_level = &sumo_dpm_debugfs_print_current_performance_level,
.force_performance_level = &sumo_dpm_force_performance_level,
},
.pflip = {
.pre_page_flip = &evergreen_pre_page_flip,
@ -1762,6 +1767,8 @@ static struct radeon_asic btc_asic = {
.get_mclk = &btc_dpm_get_mclk,
.print_power_state = &rv770_dpm_print_power_state,
.debugfs_print_current_performance_level = &rv770_dpm_debugfs_print_current_performance_level,
.force_performance_level = &rv770_dpm_force_performance_level,
.vblank_too_short = &btc_dpm_vblank_too_short,
},
.pflip = {
.pre_page_flip = &evergreen_pre_page_flip,
@ -1937,6 +1944,8 @@ static struct radeon_asic cayman_asic = {
.get_mclk = &ni_dpm_get_mclk,
.print_power_state = &ni_dpm_print_power_state,
.debugfs_print_current_performance_level = &ni_dpm_debugfs_print_current_performance_level,
.force_performance_level = &ni_dpm_force_performance_level,
.vblank_too_short = &ni_dpm_vblank_too_short,
},
.pflip = {
.pre_page_flip = &evergreen_pre_page_flip,
@ -2110,6 +2119,7 @@ static struct radeon_asic trinity_asic = {
.get_mclk = &trinity_dpm_get_mclk,
.print_power_state = &trinity_dpm_print_power_state,
.debugfs_print_current_performance_level = &trinity_dpm_debugfs_print_current_performance_level,
.force_performance_level = &trinity_dpm_force_performance_level,
},
.pflip = {
.pre_page_flip = &evergreen_pre_page_flip,
@ -2283,6 +2293,8 @@ static struct radeon_asic si_asic = {
.get_mclk = &ni_dpm_get_mclk,
.print_power_state = &ni_dpm_print_power_state,
.debugfs_print_current_performance_level = &si_dpm_debugfs_print_current_performance_level,
.force_performance_level = &si_dpm_force_performance_level,
.vblank_too_short = &ni_dpm_vblank_too_short,
},
.pflip = {
.pre_page_flip = &evergreen_pre_page_flip,

View file

@ -478,6 +478,9 @@ void rv770_dpm_print_power_state(struct radeon_device *rdev,
struct radeon_ps *ps);
void rv770_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
struct seq_file *m);
int rv770_dpm_force_performance_level(struct radeon_device *rdev,
enum radeon_dpm_forced_level level);
bool rv770_dpm_vblank_too_short(struct radeon_device *rdev);
/*
* evergreen
@ -542,6 +545,7 @@ void cypress_dpm_disable(struct radeon_device *rdev);
int cypress_dpm_set_power_state(struct radeon_device *rdev);
void cypress_dpm_display_configuration_changed(struct radeon_device *rdev);
void cypress_dpm_fini(struct radeon_device *rdev);
bool cypress_dpm_vblank_too_short(struct radeon_device *rdev);
int btc_dpm_init(struct radeon_device *rdev);
void btc_dpm_setup_asic(struct radeon_device *rdev);
int btc_dpm_enable(struct radeon_device *rdev);
@ -552,6 +556,7 @@ void btc_dpm_post_set_power_state(struct radeon_device *rdev);
void btc_dpm_fini(struct radeon_device *rdev);
u32 btc_dpm_get_sclk(struct radeon_device *rdev, bool low);
u32 btc_dpm_get_mclk(struct radeon_device *rdev, bool low);
bool btc_dpm_vblank_too_short(struct radeon_device *rdev);
int sumo_dpm_init(struct radeon_device *rdev);
int sumo_dpm_enable(struct radeon_device *rdev);
void sumo_dpm_disable(struct radeon_device *rdev);
@ -567,6 +572,8 @@ void sumo_dpm_print_power_state(struct radeon_device *rdev,
struct radeon_ps *ps);
void sumo_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
struct seq_file *m);
int sumo_dpm_force_performance_level(struct radeon_device *rdev,
enum radeon_dpm_forced_level level);
/*
* cayman
@ -615,6 +622,9 @@ void ni_dpm_print_power_state(struct radeon_device *rdev,
struct radeon_ps *ps);
void ni_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
struct seq_file *m);
int ni_dpm_force_performance_level(struct radeon_device *rdev,
enum radeon_dpm_forced_level level);
bool ni_dpm_vblank_too_short(struct radeon_device *rdev);
int trinity_dpm_init(struct radeon_device *rdev);
int trinity_dpm_enable(struct radeon_device *rdev);
void trinity_dpm_disable(struct radeon_device *rdev);
@ -630,6 +640,8 @@ void trinity_dpm_print_power_state(struct radeon_device *rdev,
struct radeon_ps *ps);
void trinity_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
struct seq_file *m);
int trinity_dpm_force_performance_level(struct radeon_device *rdev,
enum radeon_dpm_forced_level level);
/* DCE6 - SI */
void dce6_bandwidth_update(struct radeon_device *rdev);
@ -679,6 +691,8 @@ void si_dpm_fini(struct radeon_device *rdev);
void si_dpm_display_configuration_changed(struct radeon_device *rdev);
void si_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
struct seq_file *m);
int si_dpm_force_performance_level(struct radeon_device *rdev,
enum radeon_dpm_forced_level level);
/* DCE8 - CIK */
void dce8_bandwidth_update(struct radeon_device *rdev);

View file

@ -2441,6 +2441,10 @@ static bool radeon_atombios_parse_pplib_clock_info(struct radeon_device *rdev,
case ATOM_VIRTUAL_VOLTAGE_ID1:
case ATOM_VIRTUAL_VOLTAGE_ID2:
case ATOM_VIRTUAL_VOLTAGE_ID3:
case ATOM_VIRTUAL_VOLTAGE_ID4:
case ATOM_VIRTUAL_VOLTAGE_ID5:
case ATOM_VIRTUAL_VOLTAGE_ID6:
case ATOM_VIRTUAL_VOLTAGE_ID7:
if (radeon_atom_get_max_vddc(rdev, VOLTAGE_TYPE_VDDC,
rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage,
&vddc) == 0)

View file

@ -335,6 +335,7 @@ struct radeon_crtc {
u32 line_time;
u32 wm_low;
u32 wm_high;
struct drm_display_mode hw_mode;
};
struct radeon_encoder_primary_dac {

View file

@ -468,9 +468,57 @@ fail:
return count;
}
static ssize_t radeon_get_dpm_forced_performance_level(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
struct radeon_device *rdev = ddev->dev_private;
enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
return snprintf(buf, PAGE_SIZE, "%s\n",
(level == RADEON_DPM_FORCED_LEVEL_AUTO) ? "auto" :
(level == RADEON_DPM_FORCED_LEVEL_LOW) ? "low" : "high");
}
static ssize_t radeon_set_dpm_forced_performance_level(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t count)
{
struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
struct radeon_device *rdev = ddev->dev_private;
enum radeon_dpm_forced_level level;
int ret = 0;
mutex_lock(&rdev->pm.mutex);
if (strncmp("low", buf, strlen("low")) == 0) {
level = RADEON_DPM_FORCED_LEVEL_LOW;
} else if (strncmp("high", buf, strlen("high")) == 0) {
level = RADEON_DPM_FORCED_LEVEL_HIGH;
} else if (strncmp("auto", buf, strlen("auto")) == 0) {
level = RADEON_DPM_FORCED_LEVEL_AUTO;
} else {
mutex_unlock(&rdev->pm.mutex);
count = -EINVAL;
goto fail;
}
if (rdev->asic->dpm.force_performance_level) {
ret = radeon_dpm_force_performance_level(rdev, level);
if (ret)
count = -EINVAL;
}
mutex_unlock(&rdev->pm.mutex);
fail:
return count;
}
static DEVICE_ATTR(power_profile, S_IRUGO | S_IWUSR, radeon_get_pm_profile, radeon_set_pm_profile);
static DEVICE_ATTR(power_method, S_IRUGO | S_IWUSR, radeon_get_pm_method, radeon_set_pm_method);
static DEVICE_ATTR(power_dpm_state, S_IRUGO | S_IWUSR, radeon_get_dpm_state, radeon_set_dpm_state);
static DEVICE_ATTR(power_dpm_force_performance_level, S_IRUGO | S_IWUSR,
radeon_get_dpm_forced_performance_level,
radeon_set_dpm_forced_performance_level);
static ssize_t radeon_hwmon_show_temp(struct device *dev,
struct device_attribute *attr,
@ -585,12 +633,25 @@ static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev,
int i;
struct radeon_ps *ps;
u32 ui_class;
bool single_display = (rdev->pm.dpm.new_active_crtc_count < 2) ?
true : false;
restart_search:
/* check if the vblank period is too short to adjust the mclk */
if (single_display && rdev->asic->dpm.vblank_too_short) {
if (radeon_dpm_vblank_too_short(rdev))
single_display = false;
}
/* certain older asics have a separare 3D performance state,
* so try that first if the user selected performance
*/
if (dpm_state == POWER_STATE_TYPE_PERFORMANCE)
dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;
/* balanced states don't exist at the moment */
if (dpm_state == POWER_STATE_TYPE_BALANCED)
dpm_state = POWER_STATE_TYPE_PERFORMANCE;
restart_search:
/* Pick the best power state based on current conditions */
for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
ps = &rdev->pm.dpm.ps[i];
@ -600,7 +661,7 @@ restart_search:
case POWER_STATE_TYPE_BATTERY:
if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) {
if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
if (rdev->pm.dpm.new_active_crtc_count < 2)
if (single_display)
return ps;
} else
return ps;
@ -609,7 +670,7 @@ restart_search:
case POWER_STATE_TYPE_BALANCED:
if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) {
if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
if (rdev->pm.dpm.new_active_crtc_count < 2)
if (single_display)
return ps;
} else
return ps;
@ -618,7 +679,7 @@ restart_search:
case POWER_STATE_TYPE_PERFORMANCE:
if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
if (rdev->pm.dpm.new_active_crtc_count < 2)
if (single_display)
return ps;
} else
return ps;
@ -657,6 +718,10 @@ restart_search:
if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
return ps;
break;
case POWER_STATE_TYPE_INTERNAL_3DPERF:
if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
return ps;
break;
default:
break;
}
@ -675,6 +740,8 @@ restart_search:
dpm_state = POWER_STATE_TYPE_BATTERY;
goto restart_search;
case POWER_STATE_TYPE_BATTERY:
case POWER_STATE_TYPE_BALANCED:
case POWER_STATE_TYPE_INTERNAL_3DPERF:
dpm_state = POWER_STATE_TYPE_PERFORMANCE;
goto restart_search;
default:
@ -852,7 +919,7 @@ static void radeon_pm_resume_old(struct radeon_device *rdev)
{
/* set up the default clocks if the MC ucode is loaded */
if ((rdev->family >= CHIP_BARTS) &&
(rdev->family <= CHIP_CAYMAN) &&
(rdev->family <= CHIP_HAINAN) &&
rdev->mc_fw) {
if (rdev->pm.default_vddc)
radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
@ -896,7 +963,7 @@ static void radeon_pm_resume_dpm(struct radeon_device *rdev)
if (ret) {
DRM_ERROR("radeon: dpm resume failed\n");
if ((rdev->family >= CHIP_BARTS) &&
(rdev->family <= CHIP_CAYMAN) &&
(rdev->family <= CHIP_HAINAN) &&
rdev->mc_fw) {
if (rdev->pm.default_vddc)
radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
@ -947,7 +1014,7 @@ static int radeon_pm_init_old(struct radeon_device *rdev)
radeon_pm_init_profile(rdev);
/* set up the default clocks if the MC ucode is loaded */
if ((rdev->family >= CHIP_BARTS) &&
(rdev->family <= CHIP_CAYMAN) &&
(rdev->family <= CHIP_HAINAN) &&
rdev->mc_fw) {
if (rdev->pm.default_vddc)
radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
@ -1003,8 +1070,8 @@ static int radeon_pm_init_dpm(struct radeon_device *rdev)
int ret;
/* default to performance state */
rdev->pm.dpm.state = POWER_STATE_TYPE_PERFORMANCE;
rdev->pm.dpm.user_state = POWER_STATE_TYPE_PERFORMANCE;
rdev->pm.dpm.state = POWER_STATE_TYPE_BALANCED;
rdev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
rdev->pm.default_sclk = rdev->clock.default_sclk;
rdev->pm.default_mclk = rdev->clock.default_mclk;
rdev->pm.current_sclk = rdev->clock.default_sclk;
@ -1032,7 +1099,7 @@ static int radeon_pm_init_dpm(struct radeon_device *rdev)
if (ret) {
rdev->pm.dpm_enabled = false;
if ((rdev->family >= CHIP_BARTS) &&
(rdev->family <= CHIP_CAYMAN) &&
(rdev->family <= CHIP_HAINAN) &&
rdev->mc_fw) {
if (rdev->pm.default_vddc)
radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
@ -1053,6 +1120,9 @@ static int radeon_pm_init_dpm(struct radeon_device *rdev)
if (rdev->pm.num_power_states > 1) {
ret = device_create_file(rdev->dev, &dev_attr_power_dpm_state);
if (ret)
DRM_ERROR("failed to create device file for dpm state\n");
ret = device_create_file(rdev->dev, &dev_attr_power_dpm_force_performance_level);
if (ret)
DRM_ERROR("failed to create device file for dpm state\n");
/* XXX: these are noops for dpm but are here for backwards compat */
@ -1159,6 +1229,7 @@ static void radeon_pm_fini_dpm(struct radeon_device *rdev)
mutex_unlock(&rdev->pm.mutex);
device_remove_file(rdev->dev, &dev_attr_power_dpm_state);
device_remove_file(rdev->dev, &dev_attr_power_dpm_force_performance_level);
/* XXX backwards compat */
device_remove_file(rdev->dev, &dev_attr_power_profile);
device_remove_file(rdev->dev, &dev_attr_power_method);
@ -1188,7 +1259,6 @@ static void radeon_pm_compute_clocks_old(struct radeon_device *rdev)
if (rdev->pm.num_power_states < 2)
return;
INIT_WORK(&rdev->pm.dpm.thermal.work, radeon_dpm_thermal_work_handler);
mutex_lock(&rdev->pm.mutex);
rdev->pm.active_crtcs = 0;

View file

@ -1341,10 +1341,10 @@ static void rv770_program_display_gap(struct radeon_device *rdev)
u32 tmp = RREG32(CG_DISPLAY_GAP_CNTL);
tmp &= ~(DISP1_GAP_MCHG_MASK | DISP2_GAP_MCHG_MASK);
if (RREG32(AVIVO_D1CRTC_CONTROL) & AVIVO_CRTC_EN) {
if (rdev->pm.dpm.new_active_crtcs & 1) {
tmp |= DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK);
tmp |= DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE);
} else if (RREG32(AVIVO_D2CRTC_CONTROL) & AVIVO_CRTC_EN) {
} else if (rdev->pm.dpm.new_active_crtcs & 2) {
tmp |= DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE);
tmp |= DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK);
} else {
@ -1471,13 +1471,29 @@ int rv770_restrict_performance_levels_before_switch(struct radeon_device *rdev)
return 0;
}
int rv770_unrestrict_performance_levels_after_switch(struct radeon_device *rdev)
int rv770_dpm_force_performance_level(struct radeon_device *rdev,
enum radeon_dpm_forced_level level)
{
if (rv770_send_msg_to_smc(rdev, (PPSMC_Msg)(PPSMC_MSG_NoForcedLevel)) != PPSMC_Result_OK)
PPSMC_Msg msg;
if (level == RADEON_DPM_FORCED_LEVEL_HIGH) {
if (rv770_send_msg_to_smc(rdev, PPSMC_MSG_ZeroLevelsDisabled) != PPSMC_Result_OK)
return -EINVAL;
msg = PPSMC_MSG_ForceHigh;
} else if (level == RADEON_DPM_FORCED_LEVEL_LOW) {
if (rv770_send_msg_to_smc(rdev, PPSMC_MSG_NoForcedLevel) != PPSMC_Result_OK)
return -EINVAL;
msg = (PPSMC_Msg)(PPSMC_MSG_TwoLevelsDisabled);
} else {
if (rv770_send_msg_to_smc(rdev, PPSMC_MSG_NoForcedLevel) != PPSMC_Result_OK)
return -EINVAL;
msg = (PPSMC_Msg)(PPSMC_MSG_ZeroLevelsDisabled);
}
if (rv770_send_msg_to_smc(rdev, msg) != PPSMC_Result_OK)
return -EINVAL;
if (rv770_send_msg_to_smc(rdev, (PPSMC_Msg)(PPSMC_MSG_ZeroLevelsDisabled)) != PPSMC_Result_OK)
return -EINVAL;
rdev->pm.dpm.forced_level = level;
return 0;
}
@ -2047,9 +2063,10 @@ int rv770_dpm_set_power_state(struct radeon_device *rdev)
if (pi->dcodt)
rv770_program_dcodt_after_state_switch(rdev, new_ps, old_ps);
rv770_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps);
ret = rv770_unrestrict_performance_levels_after_switch(rdev);
ret = rv770_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO);
if (ret) {
DRM_ERROR("rv770_unrestrict_performance_levels_after_switch failed\n");
DRM_ERROR("rv770_dpm_force_performance_level failed\n");
return ret;
}
@ -2491,3 +2508,14 @@ u32 rv770_dpm_get_mclk(struct radeon_device *rdev, bool low)
else
return requested_state->high.mclk;
}
bool rv770_dpm_vblank_too_short(struct radeon_device *rdev)
{
u32 vblank_time = r600_dpm_get_vblank_time(rdev);
if (vblank_time < 300)
return true;
else
return false;
}

View file

@ -262,7 +262,8 @@ void rv770_stop_dpm(struct radeon_device *rdev);
void r7xx_stop_smc(struct radeon_device *rdev);
void rv770_reset_smio_status(struct radeon_device *rdev);
int rv770_restrict_performance_levels_before_switch(struct radeon_device *rdev);
int rv770_unrestrict_performance_levels_after_switch(struct radeon_device *rdev);
int rv770_dpm_force_performance_level(struct radeon_device *rdev,
enum radeon_dpm_forced_level level);
int rv770_halt_smc(struct radeon_device *rdev);
int rv770_resume_smc(struct radeon_device *rdev);
int rv770_set_sw_state(struct radeon_device *rdev);

View file

@ -2906,7 +2906,8 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
u16 vddc, vddci;
int i;
if (rdev->pm.dpm.new_active_crtc_count > 1)
if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
ni_dpm_vblank_too_short(rdev))
disable_mclk_switching = true;
else
disable_mclk_switching = false;
@ -3231,16 +3232,38 @@ static int si_restrict_performance_levels_before_switch(struct radeon_device *rd
0 : -EINVAL;
}
#if 0
static int si_unrestrict_performance_levels_after_switch(struct radeon_device *rdev)
int si_dpm_force_performance_level(struct radeon_device *rdev,
enum radeon_dpm_forced_level level)
{
if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK)
return -EINVAL;
struct radeon_ps *rps = rdev->pm.dpm.current_ps;
struct ni_ps *ps = ni_get_ps(rps);
u32 levels;
return (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, 0) == PPSMC_Result_OK) ?
0 : -EINVAL;
if (level == RADEON_DPM_FORCED_LEVEL_HIGH) {
if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, 0) != PPSMC_Result_OK)
return -EINVAL;
if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetForcedLevels, 1) != PPSMC_Result_OK)
return -EINVAL;
} else if (level == RADEON_DPM_FORCED_LEVEL_LOW) {
if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK)
return -EINVAL;
levels = ps->performance_level_count - 1;
if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, levels) != PPSMC_Result_OK)
return -EINVAL;
} else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) {
if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK)
return -EINVAL;
if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, 0) != PPSMC_Result_OK)
return -EINVAL;
}
rdev->pm.dpm.forced_level = level;
return 0;
}
#endif
static int si_set_boot_state(struct radeon_device *rdev)
{
@ -5992,11 +6015,13 @@ int si_dpm_set_power_state(struct radeon_device *rdev)
#if 0
/* XXX */
ret = si_unrestrict_performance_levels_after_switch(rdev);
ret = si_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO);
if (ret) {
DRM_ERROR("si_unrestrict_performance_levels_after_switch failed\n");
DRM_ERROR("si_dpm_force_performance_level failed\n");
return ret;
}
#else
rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO;
#endif
return 0;

View file

@ -1319,6 +1319,8 @@ int sumo_dpm_set_power_state(struct radeon_device *rdev)
if (pi->enable_dpm)
sumo_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps);
rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO;
return 0;
}
@ -1830,3 +1832,45 @@ u32 sumo_dpm_get_mclk(struct radeon_device *rdev, bool low)
return pi->sys_info.bootup_uma_clk;
}
int sumo_dpm_force_performance_level(struct radeon_device *rdev,
enum radeon_dpm_forced_level level)
{
struct sumo_power_info *pi = sumo_get_pi(rdev);
struct radeon_ps *rps = &pi->current_rps;
struct sumo_ps *ps = sumo_get_ps(rps);
int i;
if (ps->num_levels <= 1)
return 0;
if (level == RADEON_DPM_FORCED_LEVEL_HIGH) {
sumo_power_level_enable(rdev, ps->num_levels - 1, true);
sumo_set_forced_level(rdev, ps->num_levels - 1);
sumo_set_forced_mode_enabled(rdev);
for (i = 0; i < ps->num_levels - 1; i++) {
sumo_power_level_enable(rdev, i, false);
}
sumo_set_forced_mode(rdev, false);
sumo_set_forced_mode_enabled(rdev);
sumo_set_forced_mode(rdev, false);
} else if (level == RADEON_DPM_FORCED_LEVEL_LOW) {
sumo_power_level_enable(rdev, 0, true);
sumo_set_forced_level(rdev, 0);
sumo_set_forced_mode_enabled(rdev);
for (i = 1; i < ps->num_levels; i++) {
sumo_power_level_enable(rdev, i, false);
}
sumo_set_forced_mode(rdev, false);
sumo_set_forced_mode_enabled(rdev);
sumo_set_forced_mode(rdev, false);
} else {
for (i = 0; i < ps->num_levels; i++) {
sumo_power_level_enable(rdev, i, true);
}
}
rdev->pm.dpm.forced_level = level;
return 0;
}

View file

@ -1158,6 +1158,37 @@ static void trinity_setup_nbp_sim(struct radeon_device *rdev,
}
}
int trinity_dpm_force_performance_level(struct radeon_device *rdev,
enum radeon_dpm_forced_level level)
{
struct trinity_power_info *pi = trinity_get_pi(rdev);
struct radeon_ps *rps = &pi->current_rps;
struct trinity_ps *ps = trinity_get_ps(rps);
int i, ret;
if (ps->num_levels <= 1)
return 0;
if (level == RADEON_DPM_FORCED_LEVEL_HIGH) {
/* not supported by the hw */
return -EINVAL;
} else if (level == RADEON_DPM_FORCED_LEVEL_LOW) {
ret = trinity_dpm_n_levels_disabled(rdev, ps->num_levels - 1);
if (ret)
return ret;
} else {
for (i = 0; i < ps->num_levels; i++) {
ret = trinity_dpm_n_levels_disabled(rdev, 0);
if (ret)
return ret;
}
}
rdev->pm.dpm.forced_level = level;
return 0;
}
int trinity_dpm_pre_set_power_state(struct radeon_device *rdev)
{
struct trinity_power_info *pi = trinity_get_pi(rdev);
@ -1190,6 +1221,7 @@ int trinity_dpm_set_power_state(struct radeon_device *rdev)
trinity_force_level_0(rdev);
trinity_unforce_levels(rdev);
trinity_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps);
rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO;
}
trinity_release_mutex(rdev);

View file

@ -121,6 +121,7 @@ struct trinity_power_info {
int trinity_dpm_config(struct radeon_device *rdev, bool enable);
int trinity_uvd_dpm_config(struct radeon_device *rdev);
int trinity_dpm_force_state(struct radeon_device *rdev, u32 n);
int trinity_dpm_n_levels_disabled(struct radeon_device *rdev, u32 n);
int trinity_dpm_no_forced_level(struct radeon_device *rdev);
int trinity_dce_enable_voltage_adjustment(struct radeon_device *rdev,
bool enable);

View file

@ -73,6 +73,13 @@ int trinity_dpm_force_state(struct radeon_device *rdev, u32 n)
return trinity_notify_message_to_smu(rdev, PPSMC_MSG_DPM_ForceState);
}
int trinity_dpm_n_levels_disabled(struct radeon_device *rdev, u32 n)
{
WREG32_SMC(SMU_SCRATCH0, n);
return trinity_notify_message_to_smu(rdev, PPSMC_MSG_DPM_N_LevelsDisabled);
}
int trinity_uvd_dpm_config(struct radeon_device *rdev)
{
return trinity_notify_message_to_smu(rdev, PPSMC_MSG_UVD_DPM_Config);