1
0
Fork 0

Merge branch 'drm-next-5.2' of git://people.freedesktop.org/~agd5f/linux into drm-next

- Fixes for powerplay custom profiles
- DC bandwidth clean ups and fixes
- RAS fixes for vega20
- DC atomic resume fix
- Better plane handling in DC
- Freesync improvements
- Misc bug fixes and cleanups

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Alex Deucher <alexdeucher@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190411031242.3337-1-alexander.deucher@amd.com
hifive-unleashed-5.2
Dave Airlie 2019-04-12 14:46:10 +10:00
commit ecc4946f11
63 changed files with 1317 additions and 585 deletions

View File

@ -827,6 +827,7 @@ struct amdgpu_device {
/* For pre-DCE11. DCE11 and later are in "struct amdgpu_device->dm" */
struct work_struct hotplug_work;
struct amdgpu_irq_src crtc_irq;
struct amdgpu_irq_src vupdate_irq;
struct amdgpu_irq_src pageflip_irq;
struct amdgpu_irq_src hpd_irq;

View File

@ -2471,6 +2471,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
mutex_init(&adev->virt.vf_errors.lock);
hash_init(adev->mn_hash);
mutex_init(&adev->lock_reset);
mutex_init(&adev->virt.dpm_mutex);
amdgpu_device_check_arguments(adev);

View File

@ -31,6 +31,7 @@
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
#include "amdgpu_display.h"
#include "amdgpu_xgmi.h"
void amdgpu_gem_object_free(struct drm_gem_object *gobj)
{
@ -668,6 +669,7 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
struct amdgpu_device *adev = dev->dev_private;
struct drm_amdgpu_gem_op *args = data;
struct drm_gem_object *gobj;
struct amdgpu_vm_bo_base *base;
struct amdgpu_bo *robj;
int r;
@ -706,6 +708,15 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
amdgpu_bo_unreserve(robj);
break;
}
for (base = robj->vm_bo; base; base = base->next)
if (amdgpu_xgmi_same_hive(amdgpu_ttm_adev(robj->tbo.bdev),
amdgpu_ttm_adev(base->vm->root.base.bo->tbo.bdev))) {
r = -EINVAL;
amdgpu_bo_unreserve(robj);
goto out;
}
robj->preferred_domains = args->value & (AMDGPU_GEM_DOMAIN_VRAM |
AMDGPU_GEM_DOMAIN_GTT |
AMDGPU_GEM_DOMAIN_CPU);

View File

@ -696,6 +696,10 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
if (adev->pm.dpm_enabled) {
dev_info.max_engine_clock = amdgpu_dpm_get_sclk(adev, false) * 10;
dev_info.max_memory_clock = amdgpu_dpm_get_mclk(adev, false) * 10;
} else if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev) &&
adev->virt.ops->get_pp_clk) {
dev_info.max_engine_clock = amdgpu_virt_get_sclk(adev, false) * 10;
dev_info.max_memory_clock = amdgpu_virt_get_mclk(adev, false) * 10;
} else {
dev_info.max_engine_clock = adev->clock.default_sclk * 10;
dev_info.max_memory_clock = adev->clock.default_mclk * 10;

View File

@ -58,7 +58,7 @@ struct amdgpu_hpd;
#define to_amdgpu_encoder(x) container_of(x, struct amdgpu_encoder, base)
#define to_amdgpu_framebuffer(x) container_of(x, struct amdgpu_framebuffer, base)
#define to_dm_plane_state(x) container_of(x, struct dm_plane_state, base);
#define to_dm_plane_state(x) container_of(x, struct dm_plane_state, base)
#define AMDGPU_MAX_HPD_PINS 6
#define AMDGPU_MAX_CRTCS 6
@ -406,7 +406,7 @@ struct amdgpu_crtc {
struct amdgpu_flip_work *pflip_works;
enum amdgpu_flip_status pflip_status;
int deferred_flip_completion;
u64 last_flip_vblank;
u32 last_flip_vblank;
/* pll sharing */
struct amdgpu_atom_ss ss;
bool ss_enabled;

View File

@ -327,6 +327,18 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
goto fail;
}
if (amdgpu_sriov_vf(adev)) {
if (amdgim_is_hwperf(adev) &&
adev->virt.ops->force_dpm_level) {
mutex_lock(&adev->pm.mutex);
adev->virt.ops->force_dpm_level(adev, level);
mutex_unlock(&adev->pm.mutex);
return count;
} else {
return -EINVAL;
}
}
if (current_level == level)
return count;
@ -790,6 +802,10 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev) &&
adev->virt.ops->get_pp_clk)
return adev->virt.ops->get_pp_clk(adev, PP_SCLK, buf);
if (is_support_sw_smu(adev))
return smu_print_clk_levels(&adev->smu, PP_SCLK, buf);
else if (adev->powerplay.pp_funcs->print_clock_levels)

View File

@ -187,13 +187,13 @@ static int psp_tmr_init(struct psp_context *psp)
int ret;
/*
* Allocate 3M memory aligned to 1M from Frame Buffer (local
* physical).
* According to HW engineer, they prefer the TMR address be "naturally
* aligned" , e.g. the start address be an integer divide of TMR size.
*
* Note: this memory need be reserved till the driver
* uninitializes.
*/
ret = amdgpu_bo_create_kernel(psp->adev, PSP_TMR_SIZE, 0x100000,
ret = amdgpu_bo_create_kernel(psp->adev, PSP_TMR_SIZE, PSP_TMR_SIZE,
AMDGPU_GEM_DOMAIN_VRAM,
&psp->tmr_bo, &psp->tmr_mc_addr, &psp->tmr_buf);

View File

@ -530,6 +530,33 @@ int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
return 0;
}
/* Only used in device probe stage and called only once. */
int amdgpu_ras_feature_enable_on_boot(struct amdgpu_device *adev,
struct ras_common_if *head, bool enable)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
int ret;
if (!con)
return -EINVAL;
if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) {
/* If ras is enabled by vbios, we set up ras object first in
* both case. For enable, that is all what we need do. For
* disable, we need perform a ras TA disable cmd after that.
*/
ret = __amdgpu_ras_feature_enable(adev, head, 1);
if (ret)
return ret;
if (!enable)
ret = amdgpu_ras_feature_enable(adev, head, 0);
} else
ret = amdgpu_ras_feature_enable(adev, head, enable);
return ret;
}
static int amdgpu_ras_disable_all_features(struct amdgpu_device *adev,
bool bypass)
{
@ -558,11 +585,13 @@ static int amdgpu_ras_enable_all_features(struct amdgpu_device *adev,
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
int ras_block_count = AMDGPU_RAS_BLOCK_COUNT;
int i;
const enum amdgpu_ras_error_type default_ras_type =
AMDGPU_RAS_ERROR__NONE;
for (i = 0; i < ras_block_count; i++) {
struct ras_common_if head = {
.block = i,
.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
.type = default_ras_type,
.sub_block_index = 0,
};
strcpy(head.name, ras_block_str(i));
@ -1368,9 +1397,6 @@ int amdgpu_ras_init(struct amdgpu_device *adev)
amdgpu_ras_mask &= AMDGPU_RAS_BLOCK_MASK;
if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS)
amdgpu_ras_enable_all_features(adev, 1);
if (amdgpu_ras_fs_init(adev))
goto fs_out;
@ -1398,18 +1424,25 @@ void amdgpu_ras_post_init(struct amdgpu_device *adev)
if (!con)
return;
/* We enable ras on all hw_supported block, but as boot parameter might
* disable some of them and one or more IP has not implemented yet.
* So we disable them on behalf.
*/
if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) {
/* Set up all other IPs which are not implemented. There is a
* tricky thing that IP's actual ras error type should be
* MULTI_UNCORRECTABLE, but as driver does not handle it, so
* ERROR_NONE make sense anyway.
*/
amdgpu_ras_enable_all_features(adev, 1);
/* We enable ras on all hw_supported block, but as boot
* parameter might disable some of them and one or more IP has
* not implemented yet. So we disable them on behalf.
*/
list_for_each_entry_safe(obj, tmp, &con->head, node) {
if (!amdgpu_ras_is_supported(adev, obj->head.block)) {
amdgpu_ras_feature_enable(adev, &obj->head, 0);
/* there should be no any reference. */
WARN_ON(alive_obj(obj));
}
};
}
}
}

View File

@ -262,6 +262,9 @@ int amdgpu_ras_pre_fini(struct amdgpu_device *adev);
int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
struct ras_common_if *head, bool enable);
int amdgpu_ras_feature_enable_on_boot(struct amdgpu_device *adev,
struct ras_common_if *head, bool enable);
int amdgpu_ras_sysfs_create(struct amdgpu_device *adev,
struct ras_fs_if *head);

View File

@ -28,11 +28,8 @@
#define AMDGPU_MAX_SDMA_INSTANCES 2
enum amdgpu_sdma_irq {
AMDGPU_SDMA_IRQ_TRAP0 = 0,
AMDGPU_SDMA_IRQ_TRAP1,
AMDGPU_SDMA_IRQ_ECC0,
AMDGPU_SDMA_IRQ_ECC1,
AMDGPU_SDMA_IRQ_INSTANCE0 = 0,
AMDGPU_SDMA_IRQ_INSTANCE1,
AMDGPU_SDMA_IRQ_LAST
};

View File

@ -375,4 +375,53 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
}
}
static uint32_t parse_clk(char *buf, bool min)
{
char *ptr = buf;
uint32_t clk = 0;
do {
ptr = strchr(ptr, ':');
if (!ptr)
break;
ptr+=2;
clk = simple_strtoul(ptr, NULL, 10);
} while (!min);
return clk * 100;
}
uint32_t amdgpu_virt_get_sclk(struct amdgpu_device *adev, bool lowest)
{
char *buf = NULL;
uint32_t clk = 0;
buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!buf)
return -ENOMEM;
adev->virt.ops->get_pp_clk(adev, PP_SCLK, buf);
clk = parse_clk(buf, lowest);
kfree(buf);
return clk;
}
uint32_t amdgpu_virt_get_mclk(struct amdgpu_device *adev, bool lowest)
{
char *buf = NULL;
uint32_t clk = 0;
buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!buf)
return -ENOMEM;
adev->virt.ops->get_pp_clk(adev, PP_MCLK, buf);
clk = parse_clk(buf, lowest);
kfree(buf);
return clk;
}

View File

@ -57,6 +57,8 @@ struct amdgpu_virt_ops {
int (*reset_gpu)(struct amdgpu_device *adev);
int (*wait_reset)(struct amdgpu_device *adev);
void (*trans_msg)(struct amdgpu_device *adev, u32 req, u32 data1, u32 data2, u32 data3);
int (*get_pp_clk)(struct amdgpu_device *adev, u32 type, char *buf);
int (*force_dpm_level)(struct amdgpu_device *adev, u32 level);
};
/*
@ -83,6 +85,8 @@ enum AMDGIM_FEATURE_FLAG {
AMDGIM_FEATURE_GIM_LOAD_UCODES = 0x2,
/* VRAM LOST by GIM */
AMDGIM_FEATURE_GIM_FLR_VRAMLOST = 0x4,
/* HW PERF SIM in GIM */
AMDGIM_FEATURE_HW_PERF_SIMULATION = (1 << 3),
};
struct amd_sriov_msg_pf2vf_info_header {
@ -252,6 +256,8 @@ struct amdgpu_virt {
struct amdgpu_vf_error_buffer vf_errors;
struct amdgpu_virt_fw_reserve fw_reserve;
uint32_t gim_feature;
/* protect DPM events to GIM */
struct mutex dpm_mutex;
};
#define amdgpu_sriov_enabled(adev) \
@ -278,6 +284,9 @@ static inline bool is_virtual_machine(void)
#endif
}
#define amdgim_is_hwperf(adev) \
((adev)->virt.gim_feature & AMDGIM_FEATURE_HW_PERF_SIMULATION)
bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev);
void amdgpu_virt_init_setting(struct amdgpu_device *adev);
uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg);
@ -295,5 +304,7 @@ int amdgpu_virt_fw_reserve_get_checksum(void *obj, unsigned long obj_size,
unsigned int key,
unsigned int chksum);
void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev);
uint32_t amdgpu_virt_get_sclk(struct amdgpu_device *adev, bool lowest);
uint32_t amdgpu_virt_get_mclk(struct amdgpu_device *adev, bool lowest);
#endif

View File

@ -769,14 +769,17 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
addr = 0;
if (ats_entries) {
uint64_t ats_value;
uint64_t value = 0, flags;
ats_value = AMDGPU_PTE_DEFAULT_ATC;
if (level != AMDGPU_VM_PTB)
ats_value |= AMDGPU_PDE_PTE;
flags = AMDGPU_PTE_DEFAULT_ATC;
if (level != AMDGPU_VM_PTB) {
/* Handle leaf PDEs as PTEs */
flags |= AMDGPU_PDE_PTE;
amdgpu_gmc_get_vm_pde(adev, level, &value, &flags);
}
r = vm->update_funcs->update(&params, bo, addr, 0, ats_entries,
0, ats_value);
value, flags);
if (r)
return r;
@ -784,15 +787,22 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
}
if (entries) {
uint64_t value = 0;
uint64_t value = 0, flags = 0;
/* Workaround for fault priority problem on GMC9 */
if (level == AMDGPU_VM_PTB &&
adev->asic_type >= CHIP_VEGA10)
value = AMDGPU_PTE_EXECUTABLE;
if (adev->asic_type >= CHIP_VEGA10) {
if (level != AMDGPU_VM_PTB) {
/* Handle leaf PDEs as PTEs */
flags |= AMDGPU_PDE_PTE;
amdgpu_gmc_get_vm_pde(adev, level,
&value, &flags);
} else {
/* Workaround for fault priority problem on GMC9 */
flags = AMDGPU_PTE_EXECUTABLE;
}
}
r = vm->update_funcs->update(&params, bo, addr, 0, entries,
0, value);
value, flags);
if (r)
return r;
}
@ -2027,7 +2037,8 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
INIT_LIST_HEAD(&bo_va->valids);
INIT_LIST_HEAD(&bo_va->invalids);
if (bo && amdgpu_xgmi_same_hive(adev, amdgpu_ttm_adev(bo->tbo.bdev))) {
if (bo && amdgpu_xgmi_same_hive(adev, amdgpu_ttm_adev(bo->tbo.bdev)) &&
(bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM)) {
bo_va->is_xgmi = true;
mutex_lock(&adev->vm_manager.lock_pstate);
/* Power up XGMI if it can be potentially used */

View File

@ -303,6 +303,7 @@ struct amdgpu_vm_manager {
const struct amdgpu_vm_pte_funcs *vm_pte_funcs;
struct drm_sched_rq *vm_pte_rqs[AMDGPU_MAX_RINGS];
unsigned vm_pte_num_rqs;
struct amdgpu_ring *page_fault;
/* partial resident texture handling */
spinlock_t prt_lock;

View File

@ -977,8 +977,8 @@ static int cik_sdma_sw_init(void *handle)
r = amdgpu_ring_init(adev, ring, 1024,
&adev->sdma.trap_irq,
(i == 0) ?
AMDGPU_SDMA_IRQ_TRAP0 :
AMDGPU_SDMA_IRQ_TRAP1);
AMDGPU_SDMA_IRQ_INSTANCE0 :
AMDGPU_SDMA_IRQ_INSTANCE1);
if (r)
return r;
}
@ -1114,7 +1114,7 @@ static int cik_sdma_set_trap_irq_state(struct amdgpu_device *adev,
u32 sdma_cntl;
switch (type) {
case AMDGPU_SDMA_IRQ_TRAP0:
case AMDGPU_SDMA_IRQ_INSTANCE0:
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET);
@ -1130,7 +1130,7 @@ static int cik_sdma_set_trap_irq_state(struct amdgpu_device *adev,
break;
}
break;
case AMDGPU_SDMA_IRQ_TRAP1:
case AMDGPU_SDMA_IRQ_INSTANCE1:
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET);

View File

@ -3568,7 +3568,7 @@ static int gfx_v9_0_ecc_late_init(void *handle)
int r;
if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) {
amdgpu_ras_feature_enable(adev, &ras_block, 0);
amdgpu_ras_feature_enable_on_boot(adev, &ras_block, 0);
return 0;
}
@ -3581,7 +3581,7 @@ static int gfx_v9_0_ecc_late_init(void *handle)
**ras_if = ras_block;
r = amdgpu_ras_feature_enable(adev, *ras_if, 1);
r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1);
if (r)
goto feature;
@ -4840,10 +4840,16 @@ static int gfx_v9_0_cp_ecc_error_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
struct ras_common_if *ras_if = adev->gfx.ras_if;
struct ras_dispatch_if ih_data = {
.head = *adev->gfx.ras_if,
.entry = entry,
};
if (!ras_if)
return 0;
ih_data.head = *ras_if;
DRM_ERROR("CP ECC ERROR IRQ\n");
amdgpu_ras_interrupt_dispatch(adev, &ih_data);
return 0;

View File

@ -248,10 +248,16 @@ static int gmc_v9_0_process_ecc_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
struct ras_common_if *ras_if = adev->gmc.ras_if;
struct ras_dispatch_if ih_data = {
.head = *adev->gmc.ras_if,
.entry = entry,
};
if (!ras_if)
return 0;
ih_data.head = *ras_if;
amdgpu_ras_interrupt_dispatch(adev, &ih_data);
return 0;
}
@ -676,7 +682,7 @@ static int gmc_v9_0_ecc_late_init(void *handle)
int r;
if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC)) {
amdgpu_ras_feature_enable(adev, &ras_block, 0);
amdgpu_ras_feature_enable_on_boot(adev, &ras_block, 0);
return 0;
}
/* handle resume path. */
@ -689,7 +695,7 @@ static int gmc_v9_0_ecc_late_init(void *handle)
**ras_if = ras_block;
r = amdgpu_ras_feature_enable(adev, *ras_if, 1);
r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1);
if (r)
goto feature;

View File

@ -157,6 +157,82 @@ static void xgpu_ai_mailbox_trans_msg (struct amdgpu_device *adev,
xgpu_ai_mailbox_set_valid(adev, false);
}
static int xgpu_ai_get_pp_clk(struct amdgpu_device *adev, u32 type, char *buf)
{
int r = 0;
u32 req, val, size;
if (!amdgim_is_hwperf(adev) || buf == NULL)
return -EBADRQC;
switch(type) {
case PP_SCLK:
req = IDH_IRQ_GET_PP_SCLK;
break;
case PP_MCLK:
req = IDH_IRQ_GET_PP_MCLK;
break;
default:
return -EBADRQC;
}
mutex_lock(&adev->virt.dpm_mutex);
xgpu_ai_mailbox_trans_msg(adev, req, 0, 0, 0);
r = xgpu_ai_poll_msg(adev, IDH_SUCCESS);
if (!r && adev->fw_vram_usage.va != NULL) {
val = RREG32_NO_KIQ(
SOC15_REG_OFFSET(NBIO, 0,
mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW1));
size = strnlen((((char *)adev->virt.fw_reserve.p_pf2vf) +
val), PAGE_SIZE);
if (size < PAGE_SIZE)
strcpy(buf,((char *)adev->virt.fw_reserve.p_pf2vf + val));
else
size = 0;
r = size;
goto out;
}
r = xgpu_ai_poll_msg(adev, IDH_FAIL);
if(r)
pr_info("%s DPM request failed",
(type == PP_SCLK)? "SCLK" : "MCLK");
out:
mutex_unlock(&adev->virt.dpm_mutex);
return r;
}
static int xgpu_ai_force_dpm_level(struct amdgpu_device *adev, u32 level)
{
int r = 0;
u32 req = IDH_IRQ_FORCE_DPM_LEVEL;
if (!amdgim_is_hwperf(adev))
return -EBADRQC;
mutex_lock(&adev->virt.dpm_mutex);
xgpu_ai_mailbox_trans_msg(adev, req, level, 0, 0);
r = xgpu_ai_poll_msg(adev, IDH_SUCCESS);
if (!r)
goto out;
r = xgpu_ai_poll_msg(adev, IDH_FAIL);
if (!r)
pr_info("DPM request failed");
else
pr_info("Mailbox is broken");
out:
mutex_unlock(&adev->virt.dpm_mutex);
return r;
}
static int xgpu_ai_send_access_requests(struct amdgpu_device *adev,
enum idh_request req)
{
@ -375,4 +451,6 @@ const struct amdgpu_virt_ops xgpu_ai_virt_ops = {
.reset_gpu = xgpu_ai_request_reset,
.wait_reset = NULL,
.trans_msg = xgpu_ai_mailbox_trans_msg,
.get_pp_clk = xgpu_ai_get_pp_clk,
.force_dpm_level = xgpu_ai_force_dpm_level,
};

View File

@ -35,6 +35,10 @@ enum idh_request {
IDH_REL_GPU_FINI_ACCESS,
IDH_REQ_GPU_RESET_ACCESS,
IDH_IRQ_FORCE_DPM_LEVEL = 10,
IDH_IRQ_GET_PP_SCLK,
IDH_IRQ_GET_PP_MCLK,
IDH_LOG_VF_ERROR = 200,
};
@ -43,6 +47,8 @@ enum idh_event {
IDH_READY_TO_ACCESS_GPU,
IDH_FLR_NOTIFICATION,
IDH_FLR_NOTIFICATION_CMPL,
IDH_SUCCESS,
IDH_FAIL,
IDH_EVENT_MAX
};

View File

@ -674,7 +674,7 @@ static int psp_v11_0_xgmi_set_topology_info(struct psp_context *psp,
for (i = 0; i < topology_info_input->num_nodes; i++) {
topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
topology_info_input->nodes[i].is_sharing_enabled = topology->nodes[i].is_sharing_enabled;
topology_info_input->nodes[i].is_sharing_enabled = 1;
topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
}

View File

@ -870,8 +870,8 @@ static int sdma_v2_4_sw_init(void *handle)
r = amdgpu_ring_init(adev, ring, 1024,
&adev->sdma.trap_irq,
(i == 0) ?
AMDGPU_SDMA_IRQ_TRAP0 :
AMDGPU_SDMA_IRQ_TRAP1);
AMDGPU_SDMA_IRQ_INSTANCE0 :
AMDGPU_SDMA_IRQ_INSTANCE1);
if (r)
return r;
}
@ -1006,7 +1006,7 @@ static int sdma_v2_4_set_trap_irq_state(struct amdgpu_device *adev,
u32 sdma_cntl;
switch (type) {
case AMDGPU_SDMA_IRQ_TRAP0:
case AMDGPU_SDMA_IRQ_INSTANCE0:
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET);
@ -1022,7 +1022,7 @@ static int sdma_v2_4_set_trap_irq_state(struct amdgpu_device *adev,
break;
}
break;
case AMDGPU_SDMA_IRQ_TRAP1:
case AMDGPU_SDMA_IRQ_INSTANCE1:
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET);

View File

@ -1154,8 +1154,8 @@ static int sdma_v3_0_sw_init(void *handle)
r = amdgpu_ring_init(adev, ring, 1024,
&adev->sdma.trap_irq,
(i == 0) ?
AMDGPU_SDMA_IRQ_TRAP0 :
AMDGPU_SDMA_IRQ_TRAP1);
AMDGPU_SDMA_IRQ_INSTANCE0 :
AMDGPU_SDMA_IRQ_INSTANCE1);
if (r)
return r;
}
@ -1340,7 +1340,7 @@ static int sdma_v3_0_set_trap_irq_state(struct amdgpu_device *adev,
u32 sdma_cntl;
switch (type) {
case AMDGPU_SDMA_IRQ_TRAP0:
case AMDGPU_SDMA_IRQ_INSTANCE0:
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET);
@ -1356,7 +1356,7 @@ static int sdma_v3_0_set_trap_irq_state(struct amdgpu_device *adev,
break;
}
break;
case AMDGPU_SDMA_IRQ_TRAP1:
case AMDGPU_SDMA_IRQ_INSTANCE1:
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET);

View File

@ -1518,7 +1518,7 @@ static int sdma_v4_0_late_init(void *handle)
int r;
if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA)) {
amdgpu_ras_feature_enable(adev, &ras_block, 0);
amdgpu_ras_feature_enable_on_boot(adev, &ras_block, 0);
return 0;
}
@ -1532,7 +1532,7 @@ static int sdma_v4_0_late_init(void *handle)
**ras_if = ras_block;
r = amdgpu_ras_feature_enable(adev, *ras_if, 1);
r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1);
if (r)
goto feature;
@ -1551,13 +1551,13 @@ static int sdma_v4_0_late_init(void *handle)
if (r)
goto sysfs;
resume:
r = amdgpu_irq_get(adev, &adev->sdma.ecc_irq, AMDGPU_SDMA_IRQ_ECC0);
r = amdgpu_irq_get(adev, &adev->sdma.ecc_irq, AMDGPU_SDMA_IRQ_INSTANCE0);
if (r)
goto irq;
r = amdgpu_irq_get(adev, &adev->sdma.ecc_irq, AMDGPU_SDMA_IRQ_ECC1);
r = amdgpu_irq_get(adev, &adev->sdma.ecc_irq, AMDGPU_SDMA_IRQ_INSTANCE1);
if (r) {
amdgpu_irq_put(adev, &adev->sdma.ecc_irq, AMDGPU_SDMA_IRQ_ECC0);
amdgpu_irq_put(adev, &adev->sdma.ecc_irq, AMDGPU_SDMA_IRQ_INSTANCE0);
goto irq;
}
@ -1621,8 +1621,8 @@ static int sdma_v4_0_sw_init(void *handle)
r = amdgpu_ring_init(adev, ring, 1024,
&adev->sdma.trap_irq,
(i == 0) ?
AMDGPU_SDMA_IRQ_TRAP0 :
AMDGPU_SDMA_IRQ_TRAP1);
AMDGPU_SDMA_IRQ_INSTANCE0 :
AMDGPU_SDMA_IRQ_INSTANCE1);
if (r)
return r;
@ -1641,8 +1641,8 @@ static int sdma_v4_0_sw_init(void *handle)
r = amdgpu_ring_init(adev, ring, 1024,
&adev->sdma.trap_irq,
(i == 0) ?
AMDGPU_SDMA_IRQ_TRAP0 :
AMDGPU_SDMA_IRQ_TRAP1);
AMDGPU_SDMA_IRQ_INSTANCE0 :
AMDGPU_SDMA_IRQ_INSTANCE1);
if (r)
return r;
}
@ -1709,8 +1709,8 @@ static int sdma_v4_0_hw_fini(void *handle)
if (amdgpu_sriov_vf(adev))
return 0;
amdgpu_irq_put(adev, &adev->sdma.ecc_irq, AMDGPU_SDMA_IRQ_ECC0);
amdgpu_irq_put(adev, &adev->sdma.ecc_irq, AMDGPU_SDMA_IRQ_ECC1);
amdgpu_irq_put(adev, &adev->sdma.ecc_irq, AMDGPU_SDMA_IRQ_INSTANCE0);
amdgpu_irq_put(adev, &adev->sdma.ecc_irq, AMDGPU_SDMA_IRQ_INSTANCE1);
sdma_v4_0_ctx_switch_enable(adev, false);
sdma_v4_0_enable(adev, false);
@ -1780,13 +1780,12 @@ static int sdma_v4_0_set_trap_irq_state(struct amdgpu_device *adev,
unsigned type,
enum amdgpu_interrupt_state state)
{
unsigned int instance = (type == AMDGPU_SDMA_IRQ_TRAP0) ? 0 : 1;
u32 sdma_cntl;
sdma_cntl = RREG32_SDMA(instance, mmSDMA0_CNTL);
sdma_cntl = RREG32_SDMA(type, mmSDMA0_CNTL);
sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE,
state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
WREG32_SDMA(instance, mmSDMA0_CNTL, sdma_cntl);
WREG32_SDMA(type, mmSDMA0_CNTL, sdma_cntl);
return 0;
}
@ -1866,10 +1865,16 @@ static int sdma_v4_0_process_ecc_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
struct ras_common_if *ras_if = adev->sdma.ras_if;
struct ras_dispatch_if ih_data = {
.head = *adev->sdma.ras_if,
.entry = entry,
};
if (!ras_if)
return 0;
ih_data.head = *ras_if;
amdgpu_ras_interrupt_dispatch(adev, &ih_data);
return 0;
}
@ -1908,7 +1913,7 @@ static int sdma_v4_0_set_ecc_irq_state(struct amdgpu_device *adev,
{
u32 sdma_edc_config;
u32 reg_offset = (type == AMDGPU_SDMA_IRQ_ECC0) ?
u32 reg_offset = (type == AMDGPU_SDMA_IRQ_INSTANCE0) ?
sdma_v4_0_get_reg_offset(adev, 0, mmSDMA0_EDC_CONFIG) :
sdma_v4_0_get_reg_offset(adev, 1, mmSDMA0_EDC_CONFIG);
@ -2292,6 +2297,7 @@ static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev)
&sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
}
adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances - 1;
adev->vm_manager.page_fault = &adev->sdma.instance[0].page;
} else {
for (i = 0; i < adev->sdma.num_instances; i++) {
sched = &adev->sdma.instance[i].ring.sched;

View File

@ -503,8 +503,8 @@ static int si_dma_sw_init(void *handle)
r = amdgpu_ring_init(adev, ring, 1024,
&adev->sdma.trap_irq,
(i == 0) ?
AMDGPU_SDMA_IRQ_TRAP0 :
AMDGPU_SDMA_IRQ_TRAP1);
AMDGPU_SDMA_IRQ_INSTANCE0 :
AMDGPU_SDMA_IRQ_INSTANCE1);
if (r)
return r;
}
@ -591,7 +591,7 @@ static int si_dma_set_trap_irq_state(struct amdgpu_device *adev,
u32 sdma_cntl;
switch (type) {
case AMDGPU_SDMA_IRQ_TRAP0:
case AMDGPU_SDMA_IRQ_INSTANCE0:
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
sdma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET);
@ -607,7 +607,7 @@ static int si_dma_set_trap_irq_state(struct amdgpu_device *adev,
break;
}
break;
case AMDGPU_SDMA_IRQ_TRAP1:
case AMDGPU_SDMA_IRQ_INSTANCE1:
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
sdma_cntl = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET);

View File

@ -111,7 +111,8 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector);
static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
struct drm_plane *plane,
unsigned long possible_crtcs);
unsigned long possible_crtcs,
const struct dc_plane_cap *plane_cap);
static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
struct drm_plane *plane,
uint32_t link_index);
@ -251,12 +252,22 @@ get_crtc_by_otg_inst(struct amdgpu_device *adev,
return NULL;
}
static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
{
return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
}
static void dm_pflip_high_irq(void *interrupt_params)
{
struct amdgpu_crtc *amdgpu_crtc;
struct common_irq_params *irq_params = interrupt_params;
struct amdgpu_device *adev = irq_params->adev;
unsigned long flags;
struct drm_pending_vblank_event *e;
struct dm_crtc_state *acrtc_state;
uint32_t vpos, hpos, v_blank_start, v_blank_end;
bool vrr_active;
amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
@ -279,26 +290,98 @@ static void dm_pflip_high_irq(void *interrupt_params)
return;
}
/* Update to correct count(s) if racing with vblank irq */
amdgpu_crtc->last_flip_vblank = drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
/* page flip completed. */
e = amdgpu_crtc->event;
amdgpu_crtc->event = NULL;
/* wake up userspace */
if (amdgpu_crtc->event) {
drm_crtc_send_vblank_event(&amdgpu_crtc->base, amdgpu_crtc->event);
/* page flip completed. clean up */
amdgpu_crtc->event = NULL;
} else
if (!e)
WARN_ON(1);
acrtc_state = to_dm_crtc_state(amdgpu_crtc->base.state);
vrr_active = amdgpu_dm_vrr_active(acrtc_state);
/* Fixed refresh rate, or VRR scanout position outside front-porch? */
if (!vrr_active ||
!dc_stream_get_scanoutpos(acrtc_state->stream, &v_blank_start,
&v_blank_end, &hpos, &vpos) ||
(vpos < v_blank_start)) {
/* Update to correct count and vblank timestamp if racing with
* vblank irq. This also updates to the correct vblank timestamp
* even in VRR mode, as scanout is past the front-porch atm.
*/
drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
/* Wake up userspace by sending the pageflip event with proper
* count and timestamp of vblank of flip completion.
*/
if (e) {
drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
/* Event sent, so done with vblank for this flip */
drm_crtc_vblank_put(&amdgpu_crtc->base);
}
} else if (e) {
/* VRR active and inside front-porch: vblank count and
* timestamp for pageflip event will only be up to date after
* drm_crtc_handle_vblank() has been executed from late vblank
* irq handler after start of back-porch (vline 0). We queue the
* pageflip event for send-out by drm_crtc_handle_vblank() with
* updated timestamp and count, once it runs after us.
*
* We need to open-code this instead of using the helper
* drm_crtc_arm_vblank_event(), as that helper would
* call drm_crtc_accurate_vblank_count(), which we must
* not call in VRR mode while we are in front-porch!
*/
/* sequence will be replaced by real count during send-out. */
e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
e->pipe = amdgpu_crtc->crtc_id;
list_add_tail(&e->base.link, &adev->ddev->vblank_event_list);
e = NULL;
}
/* Keep track of vblank of this flip for flip throttling. We use the
* cooked hw counter, as that one incremented at start of this vblank
* of pageflip completion, so last_flip_vblank is the forbidden count
* for queueing new pageflips if vsync + VRR is enabled.
*/
amdgpu_crtc->last_flip_vblank = amdgpu_get_vblank_counter_kms(adev->ddev,
amdgpu_crtc->crtc_id);
amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
DRM_DEBUG_DRIVER("%s - crtc :%d[%p], pflip_stat:AMDGPU_FLIP_NONE\n",
__func__, amdgpu_crtc->crtc_id, amdgpu_crtc);
DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
amdgpu_crtc->crtc_id, amdgpu_crtc,
vrr_active, (int) !e);
}
drm_crtc_vblank_put(&amdgpu_crtc->base);
static void dm_vupdate_high_irq(void *interrupt_params)
{
struct common_irq_params *irq_params = interrupt_params;
struct amdgpu_device *adev = irq_params->adev;
struct amdgpu_crtc *acrtc;
struct dm_crtc_state *acrtc_state;
acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
if (acrtc) {
acrtc_state = to_dm_crtc_state(acrtc->base.state);
DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id,
amdgpu_dm_vrr_active(acrtc_state));
/* Core vblank handling is done here after end of front-porch in
* vrr mode, as vblank timestamping will give valid results
* while now done after front-porch. This will also deliver
* page-flip completion events that have been queued to us
* if a pageflip happened inside front-porch.
*/
if (amdgpu_dm_vrr_active(acrtc_state))
drm_crtc_handle_vblank(&acrtc->base);
}
}
static void dm_crtc_high_irq(void *interrupt_params)
@ -311,11 +394,24 @@ static void dm_crtc_high_irq(void *interrupt_params)
acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
if (acrtc) {
drm_crtc_handle_vblank(&acrtc->base);
amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
acrtc_state = to_dm_crtc_state(acrtc->base.state);
DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id,
amdgpu_dm_vrr_active(acrtc_state));
/* Core vblank handling at start of front-porch is only possible
* in non-vrr mode, as only there vblank timestamping will give
* valid results while done in front-porch. Otherwise defer it
* to dm_vupdate_high_irq after end of front-porch.
*/
if (!amdgpu_dm_vrr_active(acrtc_state))
drm_crtc_handle_vblank(&acrtc->base);
/* Following stuff must happen at start of vblank, for crc
* computation and below-the-range btr support in vrr mode.
*/
amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
if (acrtc_state->stream &&
acrtc_state->vrr_params.supported &&
acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
@ -888,9 +984,16 @@ static int dm_resume(void *handle)
struct drm_plane *plane;
struct drm_plane_state *new_plane_state;
struct dm_plane_state *dm_new_plane_state;
struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
enum dc_connection_type new_connection_type = dc_connection_none;
int i;
/* Recreate dc_state - DC invalidates it when setting power state to S3. */
dc_release_state(dm_state->context);
dm_state->context = dc_create_state(dm->dc);
/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
dc_resource_state_construct(dm->dc, dm_state->context);
/* power on hardware */
dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
@ -1433,6 +1536,27 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
dm_crtc_high_irq, c_irq_params);
}
/* Use VUPDATE interrupt */
for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
if (r) {
DRM_ERROR("Failed to add vupdate irq id!\n");
return r;
}
int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
int_params.irq_source =
dc_interrupt_to_irq_source(dc, i, 0);
c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
c_irq_params->adev = adev;
c_irq_params->irq_src = int_params.irq_source;
amdgpu_dm_irq_register_interrupt(adev, &int_params,
dm_vupdate_high_irq, c_irq_params);
}
/* Use GRPH_PFLIP interrupt */
for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
@ -1518,6 +1642,34 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
dm_crtc_high_irq, c_irq_params);
}
/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
* the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
* to trigger at end of each vblank, regardless of state of the lock,
* matching DCE behaviour.
*/
for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
i++) {
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
if (r) {
DRM_ERROR("Failed to add vupdate irq id!\n");
return r;
}
int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
int_params.irq_source =
dc_interrupt_to_irq_source(dc, i, 0);
c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
c_irq_params->adev = adev;
c_irq_params->irq_src = int_params.irq_source;
amdgpu_dm_irq_register_interrupt(adev, &int_params,
dm_vupdate_high_irq, c_irq_params);
}
/* Use GRPH_PFLIP interrupt */
for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
@ -1629,17 +1781,16 @@ dm_atomic_duplicate_state(struct drm_private_obj *obj)
__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
new_state->context = dc_create_state();
old_state = to_dm_atomic_state(obj->state);
if (old_state && old_state->context)
new_state->context = dc_copy_state(old_state->context);
if (!new_state->context) {
kfree(new_state);
return NULL;
}
old_state = to_dm_atomic_state(obj->state);
if (old_state && old_state->context)
dc_resource_state_copy_construct(old_state->context,
new_state->context);
return &new_state->base;
}
@ -1683,7 +1834,7 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
if (!state)
return -ENOMEM;
state->context = dc_create_state();
state->context = dc_create_state(adev->dm.dc);
if (!state->context) {
kfree(state);
return -ENOMEM;
@ -1811,7 +1962,8 @@ amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
static int initialize_plane(struct amdgpu_display_manager *dm,
struct amdgpu_mode_info *mode_info, int plane_id,
enum drm_plane_type plane_type)
enum drm_plane_type plane_type,
const struct dc_plane_cap *plane_cap)
{
struct drm_plane *plane;
unsigned long possible_crtcs;
@ -1834,7 +1986,7 @@ static int initialize_plane(struct amdgpu_display_manager *dm,
if (plane_id >= dm->dc->caps.max_streams)
possible_crtcs = 0xff;
ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs);
ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
if (ret) {
DRM_ERROR("KMS: Failed to initialize plane\n");
@ -1887,8 +2039,9 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
struct amdgpu_encoder *aencoder = NULL;
struct amdgpu_mode_info *mode_info = &adev->mode_info;
uint32_t link_cnt;
int32_t overlay_planes, primary_planes;
int32_t primary_planes;
enum dc_connection_type new_connection_type = dc_connection_none;
const struct dc_plane_cap *plane;
link_cnt = dm->dc->caps.max_links;
if (amdgpu_dm_mode_config_init(dm->adev)) {
@ -1896,24 +2049,6 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
return -EINVAL;
}
/*
* Determine the number of overlay planes supported.
* Only support DCN for now, and cap so we don't encourage
* userspace to use up all the planes.
*/
overlay_planes = 0;
for (i = 0; i < dm->dc->caps.max_planes; ++i) {
struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
if (plane->type == DC_PLANE_TYPE_DCN_UNIVERSAL &&
plane->blends_with_above && plane->blends_with_below &&
plane->supports_argb8888)
overlay_planes += 1;
}
overlay_planes = min(overlay_planes, 1);
/* There is one primary plane per CRTC */
primary_planes = dm->dc->caps.max_streams;
ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
@ -1923,8 +2058,10 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
* Order is reversed to match iteration order in atomic check.
*/
for (i = (primary_planes - 1); i >= 0; i--) {
plane = &dm->dc->caps.planes[i];
if (initialize_plane(dm, mode_info, i,
DRM_PLANE_TYPE_PRIMARY)) {
DRM_PLANE_TYPE_PRIMARY, plane)) {
DRM_ERROR("KMS: Failed to initialize primary plane\n");
goto fail;
}
@ -1935,13 +2072,30 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
* These planes have a higher DRM index than the primary planes since
* they should be considered as having a higher z-order.
* Order is reversed to match iteration order in atomic check.
*
* Only support DCN for now, and only expose one so we don't encourage
* userspace to use up all the pipes.
*/
for (i = (overlay_planes - 1); i >= 0; i--) {
for (i = 0; i < dm->dc->caps.max_planes; ++i) {
struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
continue;
if (!plane->blends_with_above || !plane->blends_with_below)
continue;
if (!plane->supports_argb8888)
continue;
if (initialize_plane(dm, NULL, primary_planes + i,
DRM_PLANE_TYPE_OVERLAY)) {
DRM_PLANE_TYPE_OVERLAY, plane)) {
DRM_ERROR("KMS: Failed to initialize overlay plane\n");
goto fail;
}
/* Only create one overlay plane. */
break;
}
for (i = 0; i < dm->dc->caps.max_streams; i++)
@ -2601,6 +2755,50 @@ fill_blending_from_plane_state(struct drm_plane_state *plane_state,
}
}
static int
fill_plane_color_attributes(const struct drm_plane_state *plane_state,
const struct dc_plane_state *dc_plane_state,
enum dc_color_space *color_space)
{
bool full_range;
*color_space = COLOR_SPACE_SRGB;
/* DRM color properties only affect non-RGB formats. */
if (dc_plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
return 0;
full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
switch (plane_state->color_encoding) {
case DRM_COLOR_YCBCR_BT601:
if (full_range)
*color_space = COLOR_SPACE_YCBCR601;
else
*color_space = COLOR_SPACE_YCBCR601_LIMITED;
break;
case DRM_COLOR_YCBCR_BT709:
if (full_range)
*color_space = COLOR_SPACE_YCBCR709;
else
*color_space = COLOR_SPACE_YCBCR709_LIMITED;
break;
case DRM_COLOR_YCBCR_BT2020:
if (full_range)
*color_space = COLOR_SPACE_2020_YCBCR;
else
return -EINVAL;
break;
default:
return -EINVAL;
}
return 0;
}
static int fill_plane_attributes(struct amdgpu_device *adev,
struct dc_plane_state *dc_plane_state,
struct drm_plane_state *plane_state,
@ -2622,6 +2820,11 @@ static int fill_plane_attributes(struct amdgpu_device *adev,
if (ret)
return ret;
ret = fill_plane_color_attributes(plane_state, dc_plane_state,
&dc_plane_state->color_space);
if (ret)
return ret;
/*
* Always set input transfer function, since plane state is refreshed
* every time.
@ -3210,12 +3413,41 @@ dm_crtc_duplicate_state(struct drm_crtc *crtc)
return &state->base;
}
static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
{
enum dc_irq_source irq_source;
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
struct amdgpu_device *adev = crtc->dev->dev_private;
int rc;
irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
acrtc->crtc_id, enable ? "en" : "dis", rc);
return rc;
}
static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
{
enum dc_irq_source irq_source;
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
struct amdgpu_device *adev = crtc->dev->dev_private;
struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
int rc = 0;
if (enable) {
/* vblank irq on -> Only need vupdate irq in vrr mode */
if (amdgpu_dm_vrr_active(acrtc_state))
rc = dm_set_vupdate_irq(crtc, true);
} else {
/* vblank irq off -> vupdate irq off */
rc = dm_set_vupdate_irq(crtc, false);
}
if (rc)
return rc;
irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
@ -3891,7 +4123,8 @@ static const u32 cursor_formats[] = {
static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
struct drm_plane *plane,
unsigned long possible_crtcs)
unsigned long possible_crtcs,
const struct dc_plane_cap *plane_cap)
{
int res = -EPERM;
@ -3928,8 +4161,8 @@ static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
break;
}
/* TODO: Check DC plane caps explicitly here for adding propertes */
if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
plane_cap && plane_cap->per_pixel_alpha) {
unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
BIT(DRM_MODE_BLEND_PREMULTI);
@ -3961,7 +4194,7 @@ static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
goto fail;
cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
res = amdgpu_dm_plane_init(dm, cursor_plane, 0);
res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
if (!acrtc)
@ -4646,7 +4879,6 @@ static void update_freesync_state_on_stream(
{
struct mod_vrr_params vrr_params = new_crtc_state->vrr_params;
struct dc_info_packet vrr_infopacket = {0};
struct mod_freesync_config config = new_crtc_state->freesync_config;
if (!new_stream)
return;
@ -4659,20 +4891,6 @@ static void update_freesync_state_on_stream(
if (!new_stream->timing.h_total || !new_stream->timing.v_total)
return;
if (new_crtc_state->vrr_supported &&
config.min_refresh_in_uhz &&
config.max_refresh_in_uhz) {
config.state = new_crtc_state->base.vrr_enabled ?
VRR_STATE_ACTIVE_VARIABLE :
VRR_STATE_INACTIVE;
} else {
config.state = VRR_STATE_UNSUPPORTED;
}
mod_freesync_build_vrr_params(dm->freesync_module,
new_stream,
&config, &vrr_params);
if (surface) {
mod_freesync_handle_preflip(
dm->freesync_module,
@ -4713,6 +4931,76 @@ static void update_freesync_state_on_stream(
(int)vrr_params.state);
}
static void pre_update_freesync_state_on_stream(
struct amdgpu_display_manager *dm,
struct dm_crtc_state *new_crtc_state)
{
struct dc_stream_state *new_stream = new_crtc_state->stream;
struct mod_vrr_params vrr_params = new_crtc_state->vrr_params;
struct mod_freesync_config config = new_crtc_state->freesync_config;
if (!new_stream)
return;
/*
* TODO: Determine why min/max totals and vrefresh can be 0 here.
* For now it's sufficient to just guard against these conditions.
*/
if (!new_stream->timing.h_total || !new_stream->timing.v_total)
return;
if (new_crtc_state->vrr_supported &&
config.min_refresh_in_uhz &&
config.max_refresh_in_uhz) {
config.state = new_crtc_state->base.vrr_enabled ?
VRR_STATE_ACTIVE_VARIABLE :
VRR_STATE_INACTIVE;
} else {
config.state = VRR_STATE_UNSUPPORTED;
}
mod_freesync_build_vrr_params(dm->freesync_module,
new_stream,
&config, &vrr_params);
new_crtc_state->freesync_timing_changed |=
(memcmp(&new_crtc_state->vrr_params.adjust,
&vrr_params.adjust,
sizeof(vrr_params.adjust)) != 0);
new_crtc_state->vrr_params = vrr_params;
}
static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
struct dm_crtc_state *new_state)
{
bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
if (!old_vrr_active && new_vrr_active) {
/* Transition VRR inactive -> active:
* While VRR is active, we must not disable vblank irq, as a
* reenable after disable would compute bogus vblank/pflip
* timestamps if it likely happened inside display front-porch.
*
* We also need vupdate irq for the actual core vblank handling
* at end of vblank.
*/
dm_set_vupdate_irq(new_state->base.crtc, true);
drm_crtc_vblank_get(new_state->base.crtc);
DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
__func__, new_state->base.crtc->base.id);
} else if (old_vrr_active && !new_vrr_active) {
/* Transition VRR active -> inactive:
* Allow vblank irq disable again for fixed refresh rate.
*/
dm_set_vupdate_irq(new_state->base.crtc, false);
drm_crtc_vblank_put(new_state->base.crtc);
DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
__func__, new_state->base.crtc->base.id);
}
}
static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
struct dc_state *dc_state,
struct drm_device *dev,
@ -4734,11 +5022,9 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
unsigned long flags;
struct amdgpu_bo *abo;
uint64_t tiling_flags;
uint32_t target, target_vblank;
uint64_t last_flip_vblank;
bool vrr_active = acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE;
uint32_t target_vblank, last_flip_vblank;
bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
bool pflip_present = false;
struct {
struct dc_surface_update surface_updates[MAX_SURFACES];
struct dc_plane_info plane_infos[MAX_SURFACES];
@ -4790,8 +5076,10 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
bundle->scaling_infos[planes_count].clip_rect = dc_plane->clip_rect;
bundle->surface_updates[planes_count].scaling_info = &bundle->scaling_infos[planes_count];
fill_plane_color_attributes(
new_plane_state, dc_plane,
&bundle->plane_infos[planes_count].color_space);
bundle->plane_infos[planes_count].color_space = dc_plane->color_space;
bundle->plane_infos[planes_count].format = dc_plane->format;
bundle->plane_infos[planes_count].plane_size = dc_plane->plane_size;
bundle->plane_infos[planes_count].rotation = dc_plane->rotation;
@ -4880,7 +5168,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
* clients using the GLX_OML_sync_control extension or
* DRI3/Present extension with defined target_msc.
*/
last_flip_vblank = drm_crtc_vblank_count(pcrtc);
last_flip_vblank = amdgpu_get_vblank_counter_kms(dm->ddev, acrtc_attach->crtc_id);
}
else {
/* For variable refresh rate mode only:
@ -4896,11 +5184,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
}
target = (uint32_t)last_flip_vblank + wait_for_vblank;
/* Prepare wait for target vblank early - before the fence-waits */
target_vblank = target - (uint32_t)drm_crtc_vblank_count(pcrtc) +
amdgpu_get_vblank_counter_kms(pcrtc->dev, acrtc_attach->crtc_id);
target_vblank = last_flip_vblank + wait_for_vblank;
/*
* Wait until we're out of the vertical blank period before the one
@ -5065,7 +5349,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
dc_state = dm_state->context;
} else {
/* No state changes, retain current state. */
dc_state_temp = dc_create_state();
dc_state_temp = dc_create_state(dm->dc);
ASSERT(dc_state_temp);
dc_state = dc_state_temp;
dc_resource_state_copy_construct_current(dm->dc, dc_state);
@ -5234,6 +5518,12 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
mutex_unlock(&dm->dc_lock);
}
/* Update freesync state before amdgpu_dm_handle_vrr_transition(). */
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
pre_update_freesync_state_on_stream(dm, dm_new_crtc_state);
}
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
/*
@ -5247,6 +5537,11 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
/* Handle vrr on->off / off->on transitions */
amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
dm_new_crtc_state);
modeset_needed = modeset_required(
new_crtc_state,
dm_new_crtc_state->stream,
@ -5891,7 +6186,9 @@ dm_determine_update_type_for_commit(struct dc *dc,
}
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
struct dc_stream_update stream_update = { 0 };
struct dc_stream_update stream_update;
memset(&stream_update, 0, sizeof(stream_update));
new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
old_dm_crtc_state = to_dm_crtc_state(old_crtc_state);

View File

@ -182,6 +182,15 @@ struct amdgpu_display_manager {
struct common_irq_params
vblank_params[DC_IRQ_SOURCE_VBLANK6 - DC_IRQ_SOURCE_VBLANK1 + 1];
/**
* @vupdate_params:
*
* Vertical update IRQ parameters, passed to registered handlers when
* triggered.
*/
struct common_irq_params
vupdate_params[DC_IRQ_SOURCE_VUPDATE6 - DC_IRQ_SOURCE_VUPDATE1 + 1];
spinlock_t irq_handler_list_table_lock;
struct backlight_device *backlight_dev;

View File

@ -995,6 +995,35 @@ static const struct drm_info_list amdgpu_dm_debugfs_list[] = {
{"amdgpu_target_backlight_pwm", &target_backlight_read},
};
/*
* Sets the DC visual confirm debug option from the given string.
* Example usage: echo 1 > /sys/kernel/debug/dri/0/amdgpu_visual_confirm
*/
static int visual_confirm_set(void *data, u64 val)
{
struct amdgpu_device *adev = data;
adev->dm.dc->debug.visual_confirm = (enum visual_confirm)val;
return 0;
}
/*
* Reads the DC visual confirm debug option value into the given buffer.
* Example usage: cat /sys/kernel/debug/dri/0/amdgpu_dm_visual_confirm
*/
static int visual_confirm_get(void *data, u64 *val)
{
struct amdgpu_device *adev = data;
*val = adev->dm.dc->debug.visual_confirm;
return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(visual_confirm_fops, visual_confirm_get,
visual_confirm_set, "%llu\n");
int dtn_debugfs_init(struct amdgpu_device *adev)
{
static const struct file_operations dtn_log_fops = {
@ -1020,5 +1049,13 @@ int dtn_debugfs_init(struct amdgpu_device *adev)
adev,
&dtn_log_fops);
return PTR_ERR_OR_ZERO(ent);
if (IS_ERR(ent))
return PTR_ERR(ent);
ent = debugfs_create_file_unsafe("amdgpu_dm_visual_confirm", 0644, root,
adev, &visual_confirm_fops);
if (IS_ERR(ent))
return PTR_ERR(ent);
return 0;
}

View File

@ -674,11 +674,30 @@ static int amdgpu_dm_set_crtc_irq_state(struct amdgpu_device *adev,
__func__);
}
static int amdgpu_dm_set_vupdate_irq_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
unsigned int crtc_id,
enum amdgpu_interrupt_state state)
{
return dm_irq_state(
adev,
source,
crtc_id,
state,
IRQ_TYPE_VUPDATE,
__func__);
}
static const struct amdgpu_irq_src_funcs dm_crtc_irq_funcs = {
.set = amdgpu_dm_set_crtc_irq_state,
.process = amdgpu_dm_irq_handler,
};
static const struct amdgpu_irq_src_funcs dm_vupdate_irq_funcs = {
.set = amdgpu_dm_set_vupdate_irq_state,
.process = amdgpu_dm_irq_handler,
};
static const struct amdgpu_irq_src_funcs dm_pageflip_irq_funcs = {
.set = amdgpu_dm_set_pflip_irq_state,
.process = amdgpu_dm_irq_handler,
@ -695,6 +714,9 @@ void amdgpu_dm_set_irq_funcs(struct amdgpu_device *adev)
adev->crtc_irq.num_types = adev->mode_info.num_crtc;
adev->crtc_irq.funcs = &dm_crtc_irq_funcs;
adev->vupdate_irq.num_types = adev->mode_info.num_crtc;
adev->vupdate_irq.funcs = &dm_vupdate_irq_funcs;
adev->pageflip_irq.num_types = adev->mode_info.num_crtc;
adev->pageflip_irq.funcs = &dm_pageflip_irq_funcs;

View File

@ -544,28 +544,28 @@ static void calc_wm_sets_and_perf_params(
v->fabric_and_dram_bandwidth = v->fabric_and_dram_bandwidth_vnom0p8;
dispclkdppclkdcfclk_deep_sleep_prefetch_parameters_watermarks_and_performance_calculation(v);
context->bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns =
context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns =
v->stutter_exit_watermark * 1000;
context->bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns =
context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns =
v->stutter_enter_plus_exit_watermark * 1000;
context->bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns =
context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns =
v->dram_clock_change_watermark * 1000;
context->bw.dcn.watermarks.b.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000;
context->bw.dcn.watermarks.b.urgent_ns = v->urgent_watermark * 1000;
context->bw_ctx.bw.dcn.watermarks.b.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000;
context->bw_ctx.bw.dcn.watermarks.b.urgent_ns = v->urgent_watermark * 1000;
v->dcfclk_per_state[1] = v->dcfclkv_nom0p8;
v->dcfclk_per_state[0] = v->dcfclkv_nom0p8;
v->dcfclk = v->dcfclkv_nom0p8;
dispclkdppclkdcfclk_deep_sleep_prefetch_parameters_watermarks_and_performance_calculation(v);
context->bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns =
context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns =
v->stutter_exit_watermark * 1000;
context->bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns =
context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns =
v->stutter_enter_plus_exit_watermark * 1000;
context->bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns =
context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns =
v->dram_clock_change_watermark * 1000;
context->bw.dcn.watermarks.c.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000;
context->bw.dcn.watermarks.c.urgent_ns = v->urgent_watermark * 1000;
context->bw_ctx.bw.dcn.watermarks.c.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000;
context->bw_ctx.bw.dcn.watermarks.c.urgent_ns = v->urgent_watermark * 1000;
}
if (v->voltage_level < 3) {
@ -579,14 +579,14 @@ static void calc_wm_sets_and_perf_params(
v->dcfclk = v->dcfclkv_max0p9;
dispclkdppclkdcfclk_deep_sleep_prefetch_parameters_watermarks_and_performance_calculation(v);
context->bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns =
context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns =
v->stutter_exit_watermark * 1000;
context->bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns =
context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns =
v->stutter_enter_plus_exit_watermark * 1000;
context->bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns =
context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns =
v->dram_clock_change_watermark * 1000;
context->bw.dcn.watermarks.d.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000;
context->bw.dcn.watermarks.d.urgent_ns = v->urgent_watermark * 1000;
context->bw_ctx.bw.dcn.watermarks.d.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000;
context->bw_ctx.bw.dcn.watermarks.d.urgent_ns = v->urgent_watermark * 1000;
}
v->fabric_and_dram_bandwidth_per_state[2] = v->fabric_and_dram_bandwidth_vnom0p8;
@ -599,20 +599,20 @@ static void calc_wm_sets_and_perf_params(
v->dcfclk = v->dcfclk_per_state[v->voltage_level];
dispclkdppclkdcfclk_deep_sleep_prefetch_parameters_watermarks_and_performance_calculation(v);
context->bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns =
context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns =
v->stutter_exit_watermark * 1000;
context->bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns =
context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns =
v->stutter_enter_plus_exit_watermark * 1000;
context->bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns =
context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns =
v->dram_clock_change_watermark * 1000;
context->bw.dcn.watermarks.a.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000;
context->bw.dcn.watermarks.a.urgent_ns = v->urgent_watermark * 1000;
context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000;
context->bw_ctx.bw.dcn.watermarks.a.urgent_ns = v->urgent_watermark * 1000;
if (v->voltage_level >= 2) {
context->bw.dcn.watermarks.b = context->bw.dcn.watermarks.a;
context->bw.dcn.watermarks.c = context->bw.dcn.watermarks.a;
context->bw_ctx.bw.dcn.watermarks.b = context->bw_ctx.bw.dcn.watermarks.a;
context->bw_ctx.bw.dcn.watermarks.c = context->bw_ctx.bw.dcn.watermarks.a;
}
if (v->voltage_level >= 3)
context->bw.dcn.watermarks.d = context->bw.dcn.watermarks.a;
context->bw_ctx.bw.dcn.watermarks.d = context->bw_ctx.bw.dcn.watermarks.a;
}
#endif
@ -1008,8 +1008,8 @@ bool dcn_validate_bandwidth(
dc->debug.sr_enter_plus_exit_time_dpm0_ns / 1000.0f;
if (dc->debug.sr_exit_time_dpm0_ns)
v->sr_exit_time = dc->debug.sr_exit_time_dpm0_ns / 1000.0f;
dc->dml.soc.sr_enter_plus_exit_time_us = v->sr_enter_plus_exit_time;
dc->dml.soc.sr_exit_time_us = v->sr_exit_time;
context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = v->sr_enter_plus_exit_time;
context->bw_ctx.dml.soc.sr_exit_time_us = v->sr_exit_time;
mode_support_and_system_configuration(v);
}
@ -1035,54 +1035,54 @@ bool dcn_validate_bandwidth(
*/
dispclkdppclkdcfclk_deep_sleep_prefetch_parameters_watermarks_and_performance_calculation(v);
context->bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns =
context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns =
v->stutter_exit_watermark * 1000;
context->bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns =
context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns =
v->stutter_enter_plus_exit_watermark * 1000;
context->bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns =
context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns =
v->dram_clock_change_watermark * 1000;
context->bw.dcn.watermarks.a.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000;
context->bw.dcn.watermarks.a.urgent_ns = v->urgent_watermark * 1000;
context->bw.dcn.watermarks.b = context->bw.dcn.watermarks.a;
context->bw.dcn.watermarks.c = context->bw.dcn.watermarks.a;
context->bw.dcn.watermarks.d = context->bw.dcn.watermarks.a;
context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000;
context->bw_ctx.bw.dcn.watermarks.a.urgent_ns = v->urgent_watermark * 1000;
context->bw_ctx.bw.dcn.watermarks.b = context->bw_ctx.bw.dcn.watermarks.a;
context->bw_ctx.bw.dcn.watermarks.c = context->bw_ctx.bw.dcn.watermarks.a;
context->bw_ctx.bw.dcn.watermarks.d = context->bw_ctx.bw.dcn.watermarks.a;
context->bw.dcn.clk.fclk_khz = (int)(bw_consumed * 1000000 /
context->bw_ctx.bw.dcn.clk.fclk_khz = (int)(bw_consumed * 1000000 /
(ddr4_dram_factor_single_Channel * v->number_of_channels));
if (bw_consumed == v->fabric_and_dram_bandwidth_vmin0p65) {
context->bw.dcn.clk.fclk_khz = (int)(bw_consumed * 1000000 / 32);
context->bw_ctx.bw.dcn.clk.fclk_khz = (int)(bw_consumed * 1000000 / 32);
}
context->bw.dcn.clk.dcfclk_deep_sleep_khz = (int)(v->dcf_clk_deep_sleep * 1000);
context->bw.dcn.clk.dcfclk_khz = (int)(v->dcfclk * 1000);
context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz = (int)(v->dcf_clk_deep_sleep * 1000);
context->bw_ctx.bw.dcn.clk.dcfclk_khz = (int)(v->dcfclk * 1000);
context->bw.dcn.clk.dispclk_khz = (int)(v->dispclk * 1000);
context->bw_ctx.bw.dcn.clk.dispclk_khz = (int)(v->dispclk * 1000);
if (dc->debug.max_disp_clk == true)
context->bw.dcn.clk.dispclk_khz = (int)(dc->dcn_soc->max_dispclk_vmax0p9 * 1000);
context->bw_ctx.bw.dcn.clk.dispclk_khz = (int)(dc->dcn_soc->max_dispclk_vmax0p9 * 1000);
if (context->bw.dcn.clk.dispclk_khz <
if (context->bw_ctx.bw.dcn.clk.dispclk_khz <
dc->debug.min_disp_clk_khz) {
context->bw.dcn.clk.dispclk_khz =
context->bw_ctx.bw.dcn.clk.dispclk_khz =
dc->debug.min_disp_clk_khz;
}
context->bw.dcn.clk.dppclk_khz = context->bw.dcn.clk.dispclk_khz / v->dispclk_dppclk_ratio;
context->bw.dcn.clk.phyclk_khz = v->phyclk_per_state[v->voltage_level];
context->bw_ctx.bw.dcn.clk.dppclk_khz = context->bw_ctx.bw.dcn.clk.dispclk_khz / v->dispclk_dppclk_ratio;
context->bw_ctx.bw.dcn.clk.phyclk_khz = v->phyclk_per_state[v->voltage_level];
switch (v->voltage_level) {
case 0:
context->bw.dcn.clk.max_supported_dppclk_khz =
context->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz =
(int)(dc->dcn_soc->max_dppclk_vmin0p65 * 1000);
break;
case 1:
context->bw.dcn.clk.max_supported_dppclk_khz =
context->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz =
(int)(dc->dcn_soc->max_dppclk_vmid0p72 * 1000);
break;
case 2:
context->bw.dcn.clk.max_supported_dppclk_khz =
context->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz =
(int)(dc->dcn_soc->max_dppclk_vnom0p8 * 1000);
break;
default:
context->bw.dcn.clk.max_supported_dppclk_khz =
context->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz =
(int)(dc->dcn_soc->max_dppclk_vmax0p9 * 1000);
break;
}
@ -1181,9 +1181,9 @@ bool dcn_validate_bandwidth(
if (v->voltage_level == 0) {
dc->dml.soc.sr_enter_plus_exit_time_us =
context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us =
dc->dcn_soc->sr_enter_plus_exit_time;
dc->dml.soc.sr_exit_time_us = dc->dcn_soc->sr_exit_time;
context->bw_ctx.dml.soc.sr_exit_time_us = dc->dcn_soc->sr_exit_time;
}
/*

View File

@ -584,6 +584,28 @@ void dc_link_set_test_pattern(struct dc_link *link,
cust_pattern_size);
}
uint32_t dc_link_bandwidth_kbps(
const struct dc_link *link,
const struct dc_link_settings *link_setting)
{
uint32_t link_bw_kbps = link_setting->link_rate * LINK_RATE_REF_FREQ_IN_KHZ; /* bytes per sec */
link_bw_kbps *= 8; /* 8 bits per byte*/
link_bw_kbps *= link_setting->lane_count;
return link_bw_kbps;
}
const struct dc_link_settings *dc_link_get_verified_link_cap(
const struct dc_link *link)
{
if (link->preferred_link_setting.lane_count != LANE_COUNT_UNKNOWN &&
link->preferred_link_setting.link_rate != LINK_RATE_UNKNOWN)
return &link->preferred_link_setting;
return &link->verified_link_cap;
}
static void destruct(struct dc *dc)
{
dc_release_state(dc->current_state);
@ -632,6 +654,8 @@ static bool construct(struct dc *dc,
#endif
enum dce_version dc_version = DCE_VERSION_UNKNOWN;
dc->config = init_params->flags;
memcpy(&dc->bb_overrides, &init_params->bb_overrides, sizeof(dc->bb_overrides));
dc_dceip = kzalloc(sizeof(*dc_dceip), GFP_KERNEL);
@ -681,13 +705,6 @@ static bool construct(struct dc *dc,
dc_ctx->dc_stream_id_count = 0;
dc->ctx = dc_ctx;
dc->current_state = dc_create_state();
if (!dc->current_state) {
dm_error("%s: failed to create validate ctx\n", __func__);
goto fail;
}
/* Create logger */
dc_ctx->dce_environment = init_params->dce_environment;
@ -739,6 +756,18 @@ static bool construct(struct dc *dc,
if (!dc->res_pool)
goto fail;
/* Creation of current_state must occur after dc->dml
* is initialized in dc_create_resource_pool because
* on creation it copies the contents of dc->dml
*/
dc->current_state = dc_create_state(dc);
if (!dc->current_state) {
dm_error("%s: failed to create validate ctx\n", __func__);
goto fail;
}
dc_resource_state_construct(dc, dc->current_state);
if (!create_links(dc, init_params->num_virtual_links))
@ -755,7 +784,7 @@ fail:
static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
{
int i, j;
struct dc_state *dangling_context = dc_create_state();
struct dc_state *dangling_context = dc_create_state(dc);
struct dc_state *current_ctx;
if (dangling_context == NULL)
@ -820,8 +849,6 @@ struct dc *dc_create(const struct dc_init_data *init_params)
if (dc->res_pool->dmcu != NULL)
dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version;
dc->config = init_params->flags;
dc->build_id = DC_BUILD_ID;
DC_LOG_DC("Display Core initialized\n");
@ -1213,18 +1240,60 @@ bool dc_post_update_surfaces_to_stream(struct dc *dc)
return true;
}
struct dc_state *dc_create_state(void)
struct dc_state *dc_create_state(struct dc *dc)
{
struct dc_state *context = kzalloc(sizeof(struct dc_state),
GFP_KERNEL);
if (!context)
return NULL;
/* Each context must have their own instance of VBA and in order to
* initialize and obtain IP and SOC the base DML instance from DC is
* initially copied into every context
*/
#ifdef CONFIG_DRM_AMD_DC_DCN1_0
memcpy(&context->bw_ctx.dml, &dc->dml, sizeof(struct display_mode_lib));
#endif
kref_init(&context->refcount);
return context;
}
struct dc_state *dc_copy_state(struct dc_state *src_ctx)
{
int i, j;
struct dc_state *new_ctx = kzalloc(sizeof(struct dc_state),
GFP_KERNEL);
if (!new_ctx)
return NULL;
memcpy(new_ctx, src_ctx, sizeof(struct dc_state));
for (i = 0; i < MAX_PIPES; i++) {
struct pipe_ctx *cur_pipe = &new_ctx->res_ctx.pipe_ctx[i];
if (cur_pipe->top_pipe)
cur_pipe->top_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->top_pipe->pipe_idx];
if (cur_pipe->bottom_pipe)
cur_pipe->bottom_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->bottom_pipe->pipe_idx];
}
for (i = 0; i < new_ctx->stream_count; i++) {
dc_stream_retain(new_ctx->streams[i]);
for (j = 0; j < new_ctx->stream_status[i].plane_count; j++)
dc_plane_state_retain(
new_ctx->stream_status[i].plane_states[j]);
}
kref_init(&new_ctx->refcount);
return new_ctx;
}
void dc_retain_state(struct dc_state *context)
{
kref_get(&context->refcount);
@ -1824,7 +1893,7 @@ void dc_commit_updates_for_stream(struct dc *dc,
if (update_type >= UPDATE_TYPE_FULL) {
/* initialize scratch memory for building context */
context = dc_create_state();
context = dc_create_state(dc);
if (context == NULL) {
DC_ERROR("Failed to allocate new validate context!\n");
return;
@ -2109,13 +2178,13 @@ void dc_link_remove_remote_sink(struct dc_link *link, struct dc_sink *sink)
void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info)
{
info->displayClock = (unsigned int)state->bw.dcn.clk.dispclk_khz;
info->engineClock = (unsigned int)state->bw.dcn.clk.dcfclk_khz;
info->memoryClock = (unsigned int)state->bw.dcn.clk.dramclk_khz;
info->maxSupportedDppClock = (unsigned int)state->bw.dcn.clk.max_supported_dppclk_khz;
info->dppClock = (unsigned int)state->bw.dcn.clk.dppclk_khz;
info->socClock = (unsigned int)state->bw.dcn.clk.socclk_khz;
info->dcfClockDeepSleep = (unsigned int)state->bw.dcn.clk.dcfclk_deep_sleep_khz;
info->fClock = (unsigned int)state->bw.dcn.clk.fclk_khz;
info->phyClock = (unsigned int)state->bw.dcn.clk.phyclk_khz;
info->displayClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dispclk_khz;
info->engineClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_khz;
info->memoryClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dramclk_khz;
info->maxSupportedDppClock = (unsigned int)state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz;
info->dppClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dppclk_khz;
info->socClock = (unsigned int)state->bw_ctx.bw.dcn.clk.socclk_khz;
info->dcfClockDeepSleep = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz;
info->fClock = (unsigned int)state->bw_ctx.bw.dcn.clk.fclk_khz;
info->phyClock = (unsigned int)state->bw_ctx.bw.dcn.clk.phyclk_khz;
}

View File

@ -351,19 +351,19 @@ void context_clock_trace(
DC_LOGGER_INIT(dc->ctx->logger);
CLOCK_TRACE("Current: dispclk_khz:%d max_dppclk_khz:%d dcfclk_khz:%d\n"
"dcfclk_deep_sleep_khz:%d fclk_khz:%d socclk_khz:%d\n",
context->bw.dcn.clk.dispclk_khz,
context->bw.dcn.clk.dppclk_khz,
context->bw.dcn.clk.dcfclk_khz,
context->bw.dcn.clk.dcfclk_deep_sleep_khz,
context->bw.dcn.clk.fclk_khz,
context->bw.dcn.clk.socclk_khz);
context->bw_ctx.bw.dcn.clk.dispclk_khz,
context->bw_ctx.bw.dcn.clk.dppclk_khz,
context->bw_ctx.bw.dcn.clk.dcfclk_khz,
context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz,
context->bw_ctx.bw.dcn.clk.fclk_khz,
context->bw_ctx.bw.dcn.clk.socclk_khz);
CLOCK_TRACE("Calculated: dispclk_khz:%d max_dppclk_khz:%d dcfclk_khz:%d\n"
"dcfclk_deep_sleep_khz:%d fclk_khz:%d socclk_khz:%d\n",
context->bw.dcn.clk.dispclk_khz,
context->bw.dcn.clk.dppclk_khz,
context->bw.dcn.clk.dcfclk_khz,
context->bw.dcn.clk.dcfclk_deep_sleep_khz,
context->bw.dcn.clk.fclk_khz,
context->bw.dcn.clk.socclk_khz);
context->bw_ctx.bw.dcn.clk.dispclk_khz,
context->bw_ctx.bw.dcn.clk.dppclk_khz,
context->bw_ctx.bw.dcn.clk.dcfclk_khz,
context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz,
context->bw_ctx.bw.dcn.clk.fclk_khz,
context->bw_ctx.bw.dcn.clk.socclk_khz);
#endif
}

View File

@ -58,7 +58,6 @@
******************************************************************************/
enum {
LINK_RATE_REF_FREQ_IN_MHZ = 27,
PEAK_FACTOR_X1000 = 1006,
/*
* Some receivers fail to train on first try and are good
@ -1397,9 +1396,18 @@ static enum dc_status enable_link_dp(
/* get link settings for video mode timing */
decide_link_settings(stream, &link_settings);
/* If link settings are different than current and link already enabled
* then need to disable before programming to new rate.
*/
if (link->link_status.link_active &&
(link->cur_link_settings.lane_count != link_settings.lane_count ||
link->cur_link_settings.link_rate != link_settings.link_rate)) {
dp_disable_link_phy(link, pipe_ctx->stream->signal);
}
pipe_ctx->stream_res.pix_clk_params.requested_sym_clk =
link_settings.link_rate * LINK_RATE_REF_FREQ_IN_KHZ;
state->dccg->funcs->update_clocks(state->dccg, state, false);
state->clk_mgr->funcs->update_clocks(state->clk_mgr, state, false);
dp_enable_link_phy(
link,
@ -2150,7 +2158,7 @@ static bool dp_active_dongle_validate_timing(
return false;
}
if (get_timing_pixel_clock_100hz(timing) > (dongle_caps->dp_hdmi_max_pixel_clk * 10))
if (get_timing_pixel_clock_100hz(timing) > (dongle_caps->dp_hdmi_max_pixel_clk_in_khz * 10))
return false;
return true;
@ -2289,14 +2297,13 @@ void core_link_resume(struct dc_link *link)
static struct fixed31_32 get_pbn_per_slot(struct dc_stream_state *stream)
{
struct dc_link_settings *link_settings =
&stream->link->cur_link_settings;
uint32_t link_rate_in_mbps =
link_settings->link_rate * LINK_RATE_REF_FREQ_IN_MHZ;
struct fixed31_32 mbps = dc_fixpt_from_int(
link_rate_in_mbps * link_settings->lane_count);
struct fixed31_32 mbytes_per_sec;
uint32_t link_rate_in_mbytes_per_sec = dc_link_bandwidth_kbps(stream->link, &stream->link->cur_link_settings);
link_rate_in_mbytes_per_sec /= 8000; /* Kbits to MBytes */
return dc_fixpt_div_int(mbps, 54);
mbytes_per_sec = dc_fixpt_from_int(link_rate_in_mbytes_per_sec);
return dc_fixpt_div_int(mbytes_per_sec, 54);
}
static int get_color_depth(enum dc_color_depth color_depth)
@ -2321,7 +2328,7 @@ static struct fixed31_32 get_pbn_from_timing(struct pipe_ctx *pipe_ctx)
uint32_t denominator;
bpc = get_color_depth(pipe_ctx->stream_res.pix_clk_params.color_depth);
kbps = pipe_ctx->stream_res.pix_clk_params.requested_pix_clk_100hz / 10 * bpc * 3;
kbps = dc_bandwidth_in_kbps_from_timing(&pipe_ctx->stream->timing);
/*
* margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
@ -2736,3 +2743,49 @@ void dc_link_enable_hpd_filter(struct dc_link *link, bool enable)
}
}
uint32_t dc_bandwidth_in_kbps_from_timing(
const struct dc_crtc_timing *timing)
{
uint32_t bits_per_channel = 0;
uint32_t kbps;
switch (timing->display_color_depth) {
case COLOR_DEPTH_666:
bits_per_channel = 6;
break;
case COLOR_DEPTH_888:
bits_per_channel = 8;
break;
case COLOR_DEPTH_101010:
bits_per_channel = 10;
break;
case COLOR_DEPTH_121212:
bits_per_channel = 12;
break;
case COLOR_DEPTH_141414:
bits_per_channel = 14;
break;
case COLOR_DEPTH_161616:
bits_per_channel = 16;
break;
default:
break;
}
ASSERT(bits_per_channel != 0);
kbps = timing->pix_clk_100hz / 10;
kbps *= bits_per_channel;
if (timing->flags.Y_ONLY != 1) {
/*Only YOnly make reduce bandwidth by 1/3 compares to RGB*/
kbps *= 3;
if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
kbps /= 2;
else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
kbps = kbps * 2 / 3;
}
return kbps;
}

View File

@ -1533,69 +1533,6 @@ static bool decide_fallback_link_setting(
return true;
}
static uint32_t bandwidth_in_kbps_from_timing(
const struct dc_crtc_timing *timing)
{
uint32_t bits_per_channel = 0;
uint32_t kbps;
switch (timing->display_color_depth) {
case COLOR_DEPTH_666:
bits_per_channel = 6;
break;
case COLOR_DEPTH_888:
bits_per_channel = 8;
break;
case COLOR_DEPTH_101010:
bits_per_channel = 10;
break;
case COLOR_DEPTH_121212:
bits_per_channel = 12;
break;
case COLOR_DEPTH_141414:
bits_per_channel = 14;
break;
case COLOR_DEPTH_161616:
bits_per_channel = 16;
break;
default:
break;
}
ASSERT(bits_per_channel != 0);
kbps = timing->pix_clk_100hz / 10;
kbps *= bits_per_channel;
if (timing->flags.Y_ONLY != 1) {
/*Only YOnly make reduce bandwidth by 1/3 compares to RGB*/
kbps *= 3;
if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
kbps /= 2;
else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
kbps = kbps * 2 / 3;
}
return kbps;
}
static uint32_t bandwidth_in_kbps_from_link_settings(
const struct dc_link_settings *link_setting)
{
uint32_t link_rate_in_kbps = link_setting->link_rate *
LINK_RATE_REF_FREQ_IN_KHZ;
uint32_t lane_count = link_setting->lane_count;
uint32_t kbps = link_rate_in_kbps;
kbps *= lane_count;
kbps *= 8; /* 8 bits per byte*/
return kbps;
}
bool dp_validate_mode_timing(
struct dc_link *link,
const struct dc_crtc_timing *timing)
@ -1612,7 +1549,7 @@ bool dp_validate_mode_timing(
return true;
/* We always use verified link settings */
link_setting = &link->verified_link_cap;
link_setting = dc_link_get_verified_link_cap(link);
/* TODO: DYNAMIC_VALIDATION needs to be implemented */
/*if (flags.DYNAMIC_VALIDATION == 1 &&
@ -1620,8 +1557,8 @@ bool dp_validate_mode_timing(
link_setting = &link->verified_link_cap;
*/
req_bw = bandwidth_in_kbps_from_timing(timing);
max_bw = bandwidth_in_kbps_from_link_settings(link_setting);
req_bw = dc_bandwidth_in_kbps_from_timing(timing);
max_bw = dc_link_bandwidth_kbps(link, link_setting);
if (req_bw <= max_bw) {
/* remember the biggest mode here, during
@ -1656,7 +1593,8 @@ static bool decide_dp_link_settings(struct dc_link *link, struct dc_link_setting
*/
while (current_link_setting.link_rate <=
link->verified_link_cap.link_rate) {
link_bw = bandwidth_in_kbps_from_link_settings(
link_bw = dc_link_bandwidth_kbps(
link,
&current_link_setting);
if (req_bw <= link_bw) {
*link_setting = current_link_setting;
@ -1707,7 +1645,8 @@ static bool decide_edp_link_settings(struct dc_link *link, struct dc_link_settin
*/
while (current_link_setting.link_rate <=
link->verified_link_cap.link_rate) {
link_bw = bandwidth_in_kbps_from_link_settings(
link_bw = dc_link_bandwidth_kbps(
link,
&current_link_setting);
if (req_bw <= link_bw) {
*link_setting = current_link_setting;
@ -1739,7 +1678,7 @@ void decide_link_settings(struct dc_stream_state *stream,
struct dc_link *link;
uint32_t req_bw;
req_bw = bandwidth_in_kbps_from_timing(&stream->timing);
req_bw = dc_bandwidth_in_kbps_from_timing(&stream->timing);
link = stream->link;
@ -2304,8 +2243,8 @@ static void get_active_converter_info(
hdmi_caps = {.raw = det_caps[3] };
union dwnstream_port_caps_byte2
hdmi_color_caps = {.raw = det_caps[2] };
link->dpcd_caps.dongle_caps.dp_hdmi_max_pixel_clk =
det_caps[1] * 25000;
link->dpcd_caps.dongle_caps.dp_hdmi_max_pixel_clk_in_khz =
det_caps[1] * 2500;
link->dpcd_caps.dongle_caps.is_dp_hdmi_s3d_converter =
hdmi_caps.bits.FRAME_SEQ_TO_FRAME_PACK;
@ -2322,7 +2261,7 @@ static void get_active_converter_info(
translate_dpcd_max_bpc(
hdmi_color_caps.bits.MAX_BITS_PER_COLOR_COMPONENT);
if (link->dpcd_caps.dongle_caps.dp_hdmi_max_pixel_clk != 0)
if (link->dpcd_caps.dongle_caps.dp_hdmi_max_pixel_clk_in_khz != 0)
link->dpcd_caps.dongle_caps.extendedCapValid = true;
}

View File

@ -1305,18 +1305,13 @@ struct pipe_ctx *dc_res_get_odm_bottom_pipe(struct pipe_ctx *pipe_ctx)
bool dc_res_is_odm_head_pipe(struct pipe_ctx *pipe_ctx)
{
struct pipe_ctx *top_pipe = pipe_ctx->top_pipe;
bool result = false;
if (!top_pipe)
return false;
if (top_pipe && top_pipe->stream_res.opp == pipe_ctx->stream_res.opp)
return false;
while (top_pipe) {
if (!top_pipe->top_pipe && top_pipe->stream_res.opp != pipe_ctx->stream_res.opp)
result = true;
top_pipe = top_pipe->top_pipe;
}
return result;
return true;
}
bool dc_remove_plane_from_context(
@ -2064,7 +2059,7 @@ void dc_resource_state_construct(
const struct dc *dc,
struct dc_state *dst_ctx)
{
dst_ctx->dccg = dc->res_pool->clk_mgr;
dst_ctx->clk_mgr = dc->res_pool->clk_mgr;
}
/**

View File

@ -39,7 +39,7 @@
#include "inc/hw/dmcu.h"
#include "dml/display_mode_lib.h"
#define DC_VER "3.2.24"
#define DC_VER "3.2.25"
#define MAX_SURFACES 3
#define MAX_PLANES 6
@ -669,7 +669,8 @@ void dc_resource_state_destruct(struct dc_state *context);
bool dc_commit_state(struct dc *dc, struct dc_state *context);
struct dc_state *dc_create_state(void);
struct dc_state *dc_create_state(struct dc *dc);
struct dc_state *dc_copy_state(struct dc_state *src_ctx);
void dc_retain_state(struct dc_state *context);
void dc_release_state(struct dc_state *context);

View File

@ -246,10 +246,18 @@ void dc_link_set_test_pattern(struct dc_link *link,
const struct link_training_settings *p_link_settings,
const unsigned char *p_custom_pattern,
unsigned int cust_pattern_size);
uint32_t dc_link_bandwidth_kbps(
const struct dc_link *link,
const struct dc_link_settings *link_setting);
const struct dc_link_settings *dc_link_get_verified_link_cap(
const struct dc_link *link);
bool dc_submit_i2c(
struct dc *dc,
uint32_t link_index,
struct i2c_command *cmd);
uint32_t dc_bandwidth_in_kbps_from_timing(
const struct dc_crtc_timing *timing);
#endif /* DC_LINK_H_ */

View File

@ -395,7 +395,7 @@ struct dc_dongle_caps {
bool is_dp_hdmi_ycbcr422_converter;
bool is_dp_hdmi_ycbcr420_converter;
uint32_t dp_hdmi_max_bpc;
uint32_t dp_hdmi_max_pixel_clk;
uint32_t dp_hdmi_max_pixel_clk_in_khz;
};
/* Scaling format */
enum scaling_transformation {

View File

@ -222,7 +222,7 @@ static enum dm_pp_clocks_state dce_get_required_clocks_state(
* all required clocks
*/
for (i = clk_mgr_dce->max_clks_state; i >= DM_PP_CLOCKS_STATE_ULTRA_LOW; i--)
if (context->bw.dce.dispclk_khz >
if (context->bw_ctx.bw.dce.dispclk_khz >
clk_mgr_dce->max_clks_by_state[i].display_clk_khz
|| max_pix_clk >
clk_mgr_dce->max_clks_by_state[i].pixel_clk_khz)
@ -232,7 +232,7 @@ static enum dm_pp_clocks_state dce_get_required_clocks_state(
if (low_req_clk > clk_mgr_dce->max_clks_state) {
/* set max clock state for high phyclock, invalid on exceeding display clock */
if (clk_mgr_dce->max_clks_by_state[clk_mgr_dce->max_clks_state].display_clk_khz
< context->bw.dce.dispclk_khz)
< context->bw_ctx.bw.dce.dispclk_khz)
low_req_clk = DM_PP_CLOCKS_STATE_INVALID;
else
low_req_clk = clk_mgr_dce->max_clks_state;
@ -610,22 +610,22 @@ static void dce11_pplib_apply_display_requirements(
struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
pp_display_cfg->all_displays_in_sync =
context->bw.dce.all_displays_in_sync;
context->bw_ctx.bw.dce.all_displays_in_sync;
pp_display_cfg->nb_pstate_switch_disable =
context->bw.dce.nbp_state_change_enable == false;
context->bw_ctx.bw.dce.nbp_state_change_enable == false;
pp_display_cfg->cpu_cc6_disable =
context->bw.dce.cpuc_state_change_enable == false;
context->bw_ctx.bw.dce.cpuc_state_change_enable == false;
pp_display_cfg->cpu_pstate_disable =
context->bw.dce.cpup_state_change_enable == false;
context->bw_ctx.bw.dce.cpup_state_change_enable == false;
pp_display_cfg->cpu_pstate_separation_time =
context->bw.dce.blackout_recovery_time_us;
context->bw_ctx.bw.dce.blackout_recovery_time_us;
pp_display_cfg->min_memory_clock_khz = context->bw.dce.yclk_khz
pp_display_cfg->min_memory_clock_khz = context->bw_ctx.bw.dce.yclk_khz
/ MEMORY_TYPE_MULTIPLIER_CZ;
pp_display_cfg->min_engine_clock_khz = determine_sclk_from_bounding_box(
dc,
context->bw.dce.sclk_khz);
context->bw_ctx.bw.dce.sclk_khz);
/*
* As workaround for >4x4K lightup set dcfclock to min_engine_clock value.
@ -638,7 +638,7 @@ static void dce11_pplib_apply_display_requirements(
pp_display_cfg->min_engine_clock_khz : 0;
pp_display_cfg->min_engine_clock_deep_sleep_khz
= context->bw.dce.sclk_deep_sleep_khz;
= context->bw_ctx.bw.dce.sclk_deep_sleep_khz;
pp_display_cfg->avail_mclk_switch_time_us =
dce110_get_min_vblank_time_us(context);
@ -669,7 +669,7 @@ static void dce_update_clocks(struct clk_mgr *clk_mgr,
{
struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
struct dm_pp_power_level_change_request level_change_req;
int patched_disp_clk = context->bw.dce.dispclk_khz;
int patched_disp_clk = context->bw_ctx.bw.dce.dispclk_khz;
/*TODO: W/A for dal3 linux, investigate why this works */
if (!clk_mgr_dce->dfs_bypass_active)
@ -696,7 +696,7 @@ static void dce11_update_clocks(struct clk_mgr *clk_mgr,
{
struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
struct dm_pp_power_level_change_request level_change_req;
int patched_disp_clk = context->bw.dce.dispclk_khz;
int patched_disp_clk = context->bw_ctx.bw.dce.dispclk_khz;
/*TODO: W/A for dal3 linux, investigate why this works */
if (!clk_mgr_dce->dfs_bypass_active)
@ -711,7 +711,7 @@ static void dce11_update_clocks(struct clk_mgr *clk_mgr,
}
if (should_set_clock(safe_to_lower, patched_disp_clk, clk_mgr->clks.dispclk_khz)) {
context->bw.dce.dispclk_khz = dce_set_clock(clk_mgr, patched_disp_clk);
context->bw_ctx.bw.dce.dispclk_khz = dce_set_clock(clk_mgr, patched_disp_clk);
clk_mgr->clks.dispclk_khz = patched_disp_clk;
}
dce11_pplib_apply_display_requirements(clk_mgr->ctx->dc, context);
@ -723,7 +723,7 @@ static void dce112_update_clocks(struct clk_mgr *clk_mgr,
{
struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
struct dm_pp_power_level_change_request level_change_req;
int patched_disp_clk = context->bw.dce.dispclk_khz;
int patched_disp_clk = context->bw_ctx.bw.dce.dispclk_khz;
/*TODO: W/A for dal3 linux, investigate why this works */
if (!clk_mgr_dce->dfs_bypass_active)
@ -751,7 +751,7 @@ static void dce12_update_clocks(struct clk_mgr *clk_mgr,
struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
struct dm_pp_clock_for_voltage_req clock_voltage_req = {0};
int max_pix_clk = get_max_pixel_clock_for_all_paths(context);
int patched_disp_clk = context->bw.dce.dispclk_khz;
int patched_disp_clk = context->bw_ctx.bw.dce.dispclk_khz;
/*TODO: W/A for dal3 linux, investigate why this works */
if (!clk_mgr_dce->dfs_bypass_active)

View File

@ -773,11 +773,11 @@ bool dce100_validate_bandwidth(
if (at_least_one_pipe) {
/* TODO implement when needed but for now hardcode max value*/
context->bw.dce.dispclk_khz = 681000;
context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER_CZ;
context->bw_ctx.bw.dce.dispclk_khz = 681000;
context->bw_ctx.bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER_CZ;
} else {
context->bw.dce.dispclk_khz = 0;
context->bw.dce.yclk_khz = 0;
context->bw_ctx.bw.dce.dispclk_khz = 0;
context->bw_ctx.bw.dce.yclk_khz = 0;
}
return true;

View File

@ -1166,8 +1166,8 @@ static void build_audio_output(
if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT ||
pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
audio_output->pll_info.dp_dto_source_clock_in_khz =
state->dccg->funcs->get_dp_ref_clk_frequency(
state->dccg);
state->clk_mgr->funcs->get_dp_ref_clk_frequency(
state->clk_mgr);
}
audio_output->pll_info.feed_back_divider =
@ -1630,18 +1630,18 @@ static void dce110_set_displaymarks(
dc->bw_vbios->blackout_duration, pipe_ctx->stream);
pipe_ctx->plane_res.mi->funcs->mem_input_program_display_marks(
pipe_ctx->plane_res.mi,
context->bw.dce.nbp_state_change_wm_ns[num_pipes],
context->bw.dce.stutter_exit_wm_ns[num_pipes],
context->bw.dce.stutter_entry_wm_ns[num_pipes],
context->bw.dce.urgent_wm_ns[num_pipes],
context->bw_ctx.bw.dce.nbp_state_change_wm_ns[num_pipes],
context->bw_ctx.bw.dce.stutter_exit_wm_ns[num_pipes],
context->bw_ctx.bw.dce.stutter_entry_wm_ns[num_pipes],
context->bw_ctx.bw.dce.urgent_wm_ns[num_pipes],
total_dest_line_time_ns);
if (i == underlay_idx) {
num_pipes++;
pipe_ctx->plane_res.mi->funcs->mem_input_program_chroma_display_marks(
pipe_ctx->plane_res.mi,
context->bw.dce.nbp_state_change_wm_ns[num_pipes],
context->bw.dce.stutter_exit_wm_ns[num_pipes],
context->bw.dce.urgent_wm_ns[num_pipes],
context->bw_ctx.bw.dce.nbp_state_change_wm_ns[num_pipes],
context->bw_ctx.bw.dce.stutter_exit_wm_ns[num_pipes],
context->bw_ctx.bw.dce.urgent_wm_ns[num_pipes],
total_dest_line_time_ns);
}
num_pipes++;

View File

@ -883,7 +883,7 @@ static bool dce110_validate_bandwidth(
dc->bw_vbios,
context->res_ctx.pipe_ctx,
dc->res_pool->pipe_count,
&context->bw.dce))
&context->bw_ctx.bw.dce))
result = true;
if (!result)
@ -893,8 +893,8 @@ static bool dce110_validate_bandwidth(
context->streams[0]->timing.v_addressable,
context->streams[0]->timing.pix_clk_100hz / 10);
if (memcmp(&dc->current_state->bw.dce,
&context->bw.dce, sizeof(context->bw.dce))) {
if (memcmp(&dc->current_state->bw_ctx.bw.dce,
&context->bw_ctx.bw.dce, sizeof(context->bw_ctx.bw.dce))) {
DC_LOG_BANDWIDTH_CALCS(
"%s: finish,\n"
@ -908,34 +908,34 @@ static bool dce110_validate_bandwidth(
"sclk: %d sclk_sleep: %d yclk: %d blackout_recovery_time_us: %d\n"
,
__func__,
context->bw.dce.nbp_state_change_wm_ns[0].b_mark,
context->bw.dce.nbp_state_change_wm_ns[0].a_mark,
context->bw.dce.urgent_wm_ns[0].b_mark,
context->bw.dce.urgent_wm_ns[0].a_mark,
context->bw.dce.stutter_exit_wm_ns[0].b_mark,
context->bw.dce.stutter_exit_wm_ns[0].a_mark,
context->bw.dce.nbp_state_change_wm_ns[1].b_mark,
context->bw.dce.nbp_state_change_wm_ns[1].a_mark,
context->bw.dce.urgent_wm_ns[1].b_mark,
context->bw.dce.urgent_wm_ns[1].a_mark,
context->bw.dce.stutter_exit_wm_ns[1].b_mark,
context->bw.dce.stutter_exit_wm_ns[1].a_mark,
context->bw.dce.nbp_state_change_wm_ns[2].b_mark,
context->bw.dce.nbp_state_change_wm_ns[2].a_mark,
context->bw.dce.urgent_wm_ns[2].b_mark,
context->bw.dce.urgent_wm_ns[2].a_mark,
context->bw.dce.stutter_exit_wm_ns[2].b_mark,
context->bw.dce.stutter_exit_wm_ns[2].a_mark,
context->bw.dce.stutter_mode_enable,
context->bw.dce.cpuc_state_change_enable,
context->bw.dce.cpup_state_change_enable,
context->bw.dce.nbp_state_change_enable,
context->bw.dce.all_displays_in_sync,
context->bw.dce.dispclk_khz,
context->bw.dce.sclk_khz,
context->bw.dce.sclk_deep_sleep_khz,
context->bw.dce.yclk_khz,
context->bw.dce.blackout_recovery_time_us);
context->bw_ctx.bw.dce.nbp_state_change_wm_ns[0].b_mark,
context->bw_ctx.bw.dce.nbp_state_change_wm_ns[0].a_mark,
context->bw_ctx.bw.dce.urgent_wm_ns[0].b_mark,
context->bw_ctx.bw.dce.urgent_wm_ns[0].a_mark,
context->bw_ctx.bw.dce.stutter_exit_wm_ns[0].b_mark,
context->bw_ctx.bw.dce.stutter_exit_wm_ns[0].a_mark,
context->bw_ctx.bw.dce.nbp_state_change_wm_ns[1].b_mark,
context->bw_ctx.bw.dce.nbp_state_change_wm_ns[1].a_mark,
context->bw_ctx.bw.dce.urgent_wm_ns[1].b_mark,
context->bw_ctx.bw.dce.urgent_wm_ns[1].a_mark,
context->bw_ctx.bw.dce.stutter_exit_wm_ns[1].b_mark,
context->bw_ctx.bw.dce.stutter_exit_wm_ns[1].a_mark,
context->bw_ctx.bw.dce.nbp_state_change_wm_ns[2].b_mark,
context->bw_ctx.bw.dce.nbp_state_change_wm_ns[2].a_mark,
context->bw_ctx.bw.dce.urgent_wm_ns[2].b_mark,
context->bw_ctx.bw.dce.urgent_wm_ns[2].a_mark,
context->bw_ctx.bw.dce.stutter_exit_wm_ns[2].b_mark,
context->bw_ctx.bw.dce.stutter_exit_wm_ns[2].a_mark,
context->bw_ctx.bw.dce.stutter_mode_enable,
context->bw_ctx.bw.dce.cpuc_state_change_enable,
context->bw_ctx.bw.dce.cpup_state_change_enable,
context->bw_ctx.bw.dce.nbp_state_change_enable,
context->bw_ctx.bw.dce.all_displays_in_sync,
context->bw_ctx.bw.dce.dispclk_khz,
context->bw_ctx.bw.dce.sclk_khz,
context->bw_ctx.bw.dce.sclk_deep_sleep_khz,
context->bw_ctx.bw.dce.yclk_khz,
context->bw_ctx.bw.dce.blackout_recovery_time_us);
}
return result;
}

View File

@ -823,7 +823,7 @@ bool dce112_validate_bandwidth(
dc->bw_vbios,
context->res_ctx.pipe_ctx,
dc->res_pool->pipe_count,
&context->bw.dce))
&context->bw_ctx.bw.dce))
result = true;
if (!result)
@ -831,8 +831,8 @@ bool dce112_validate_bandwidth(
"%s: Bandwidth validation failed!",
__func__);
if (memcmp(&dc->current_state->bw.dce,
&context->bw.dce, sizeof(context->bw.dce))) {
if (memcmp(&dc->current_state->bw_ctx.bw.dce,
&context->bw_ctx.bw.dce, sizeof(context->bw_ctx.bw.dce))) {
DC_LOG_BANDWIDTH_CALCS(
"%s: finish,\n"
@ -846,34 +846,34 @@ bool dce112_validate_bandwidth(
"sclk: %d sclk_sleep: %d yclk: %d blackout_recovery_time_us: %d\n"
,
__func__,
context->bw.dce.nbp_state_change_wm_ns[0].b_mark,
context->bw.dce.nbp_state_change_wm_ns[0].a_mark,
context->bw.dce.urgent_wm_ns[0].b_mark,
context->bw.dce.urgent_wm_ns[0].a_mark,
context->bw.dce.stutter_exit_wm_ns[0].b_mark,
context->bw.dce.stutter_exit_wm_ns[0].a_mark,
context->bw.dce.nbp_state_change_wm_ns[1].b_mark,
context->bw.dce.nbp_state_change_wm_ns[1].a_mark,
context->bw.dce.urgent_wm_ns[1].b_mark,
context->bw.dce.urgent_wm_ns[1].a_mark,
context->bw.dce.stutter_exit_wm_ns[1].b_mark,
context->bw.dce.stutter_exit_wm_ns[1].a_mark,
context->bw.dce.nbp_state_change_wm_ns[2].b_mark,
context->bw.dce.nbp_state_change_wm_ns[2].a_mark,
context->bw.dce.urgent_wm_ns[2].b_mark,
context->bw.dce.urgent_wm_ns[2].a_mark,
context->bw.dce.stutter_exit_wm_ns[2].b_mark,
context->bw.dce.stutter_exit_wm_ns[2].a_mark,
context->bw.dce.stutter_mode_enable,
context->bw.dce.cpuc_state_change_enable,
context->bw.dce.cpup_state_change_enable,
context->bw.dce.nbp_state_change_enable,
context->bw.dce.all_displays_in_sync,
context->bw.dce.dispclk_khz,
context->bw.dce.sclk_khz,
context->bw.dce.sclk_deep_sleep_khz,
context->bw.dce.yclk_khz,
context->bw.dce.blackout_recovery_time_us);
context->bw_ctx.bw.dce.nbp_state_change_wm_ns[0].b_mark,
context->bw_ctx.bw.dce.nbp_state_change_wm_ns[0].a_mark,
context->bw_ctx.bw.dce.urgent_wm_ns[0].b_mark,
context->bw_ctx.bw.dce.urgent_wm_ns[0].a_mark,
context->bw_ctx.bw.dce.stutter_exit_wm_ns[0].b_mark,
context->bw_ctx.bw.dce.stutter_exit_wm_ns[0].a_mark,
context->bw_ctx.bw.dce.nbp_state_change_wm_ns[1].b_mark,
context->bw_ctx.bw.dce.nbp_state_change_wm_ns[1].a_mark,
context->bw_ctx.bw.dce.urgent_wm_ns[1].b_mark,
context->bw_ctx.bw.dce.urgent_wm_ns[1].a_mark,
context->bw_ctx.bw.dce.stutter_exit_wm_ns[1].b_mark,
context->bw_ctx.bw.dce.stutter_exit_wm_ns[1].a_mark,
context->bw_ctx.bw.dce.nbp_state_change_wm_ns[2].b_mark,
context->bw_ctx.bw.dce.nbp_state_change_wm_ns[2].a_mark,
context->bw_ctx.bw.dce.urgent_wm_ns[2].b_mark,
context->bw_ctx.bw.dce.urgent_wm_ns[2].a_mark,
context->bw_ctx.bw.dce.stutter_exit_wm_ns[2].b_mark,
context->bw_ctx.bw.dce.stutter_exit_wm_ns[2].a_mark,
context->bw_ctx.bw.dce.stutter_mode_enable,
context->bw_ctx.bw.dce.cpuc_state_change_enable,
context->bw_ctx.bw.dce.cpup_state_change_enable,
context->bw_ctx.bw.dce.nbp_state_change_enable,
context->bw_ctx.bw.dce.all_displays_in_sync,
context->bw_ctx.bw.dce.dispclk_khz,
context->bw_ctx.bw.dce.sclk_khz,
context->bw_ctx.bw.dce.sclk_deep_sleep_khz,
context->bw_ctx.bw.dce.yclk_khz,
context->bw_ctx.bw.dce.blackout_recovery_time_us);
}
return result;
}

View File

@ -807,11 +807,11 @@ bool dce80_validate_bandwidth(
if (at_least_one_pipe) {
/* TODO implement when needed but for now hardcode max value*/
context->bw.dce.dispclk_khz = 681000;
context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER_CZ;
context->bw_ctx.bw.dce.dispclk_khz = 681000;
context->bw_ctx.bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER_CZ;
} else {
context->bw.dce.dispclk_khz = 0;
context->bw.dce.yclk_khz = 0;
context->bw_ctx.bw.dce.dispclk_khz = 0;
context->bw_ctx.bw.dce.yclk_khz = 0;
}
return true;

View File

@ -150,7 +150,7 @@ static void dcn1_update_clocks(struct clk_mgr *clk_mgr,
{
struct dc *dc = clk_mgr->ctx->dc;
struct dc_debug_options *debug = &dc->debug;
struct dc_clocks *new_clocks = &context->bw.dcn.clk;
struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
struct pp_smu_display_requirement_rv *smu_req_cur =
&dc->res_pool->pp_smu_req;
struct pp_smu_display_requirement_rv smu_req = *smu_req_cur;

View File

@ -385,6 +385,10 @@ void dpp1_cnv_setup (
default:
break;
}
/* Set default color space based on format if none is given. */
color_space = input_color_space ? input_color_space : color_space;
REG_SET(CNVC_SURFACE_PIXEL_FORMAT, 0,
CNVC_SURFACE_PIXEL_FORMAT, pixel_format);
REG_UPDATE(FORMAT_CONTROL, FORMAT_CONTROL__ALPHA_EN, alpha_en);
@ -396,7 +400,7 @@ void dpp1_cnv_setup (
for (i = 0; i < 12; i++)
tbl_entry.regval[i] = input_csc_color_matrix.matrix[i];
tbl_entry.color_space = input_color_space;
tbl_entry.color_space = color_space;
if (color_space >= COLOR_SPACE_YCBCR601)
select = INPUT_CSC_SELECT_ICSC;

View File

@ -345,13 +345,13 @@ void dcn10_log_hw_state(struct dc *dc,
DTN_INFO("\nCALCULATED Clocks: dcfclk_khz:%d dcfclk_deep_sleep_khz:%d dispclk_khz:%d\n"
"dppclk_khz:%d max_supported_dppclk_khz:%d fclk_khz:%d socclk_khz:%d\n\n",
dc->current_state->bw.dcn.clk.dcfclk_khz,
dc->current_state->bw.dcn.clk.dcfclk_deep_sleep_khz,
dc->current_state->bw.dcn.clk.dispclk_khz,
dc->current_state->bw.dcn.clk.dppclk_khz,
dc->current_state->bw.dcn.clk.max_supported_dppclk_khz,
dc->current_state->bw.dcn.clk.fclk_khz,
dc->current_state->bw.dcn.clk.socclk_khz);
dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_khz,
dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz,
dc->current_state->bw_ctx.bw.dcn.clk.dispclk_khz,
dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz,
dc->current_state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz,
dc->current_state->bw_ctx.bw.dcn.clk.fclk_khz,
dc->current_state->bw_ctx.bw.dcn.clk.socclk_khz);
log_mpc_crc(dc, log_ctx);
@ -979,16 +979,14 @@ static void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
* to non-preferred front end. If pipe_ctx->stream is not NULL,
* we will use the pipe, so don't disable
*/
if (pipe_ctx->stream != NULL)
if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
continue;
if (tg->funcs->is_tg_enabled(tg))
tg->funcs->lock(tg);
/* Blank controller using driver code instead of
* command table.
*/
if (tg->funcs->is_tg_enabled(tg)) {
tg->funcs->lock(tg);
tg->funcs->set_blank(tg, true);
hwss_wait_for_blank_complete(tg);
}
@ -1854,7 +1852,7 @@ void dcn10_get_hdr_visual_confirm_color(
switch (top_pipe_ctx->plane_res.scl_data.format) {
case PIXEL_FORMAT_ARGB2101010:
if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_UNITY) {
if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_PQ) {
/* HDR10, ARGB2101010 - set boarder color to red */
color->color_r_cr = color_value;
}
@ -1949,7 +1947,7 @@ static void update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state)
plane_state->format,
EXPANSION_MODE_ZERO,
plane_state->input_csc_color_matrix,
COLOR_SPACE_YCBCR601_LIMITED);
plane_state->color_space);
//set scale and bias registers
build_prescale_params(&bns_params, plane_state);
@ -2069,7 +2067,7 @@ void update_dchubp_dpp(
* divided by 2
*/
if (plane_state->update_flags.bits.full_update) {
bool should_divided_by_2 = context->bw.dcn.clk.dppclk_khz <=
bool should_divided_by_2 = context->bw_ctx.bw.dcn.clk.dppclk_khz <=
dc->res_pool->clk_mgr->clks.dispclk_khz / 2;
dpp->funcs->dpp_dppclk_control(
@ -2138,6 +2136,9 @@ void update_dchubp_dpp(
if (pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
dc->hwss.set_cursor_position(pipe_ctx);
dc->hwss.set_cursor_attribute(pipe_ctx);
if (dc->hwss.set_cursor_sdr_white_level)
dc->hwss.set_cursor_sdr_white_level(pipe_ctx);
}
if (plane_state->update_flags.bits.full_update) {
@ -2328,6 +2329,7 @@ static void dcn10_apply_ctx_for_surface(
int i;
struct timing_generator *tg;
bool removed_pipe[4] = { false };
bool interdependent_update = false;
struct pipe_ctx *top_pipe_to_program =
find_top_pipe_for_stream(dc, context, stream);
DC_LOGGER_INIT(dc->ctx->logger);
@ -2337,7 +2339,13 @@ static void dcn10_apply_ctx_for_surface(
tg = top_pipe_to_program->stream_res.tg;
dcn10_pipe_control_lock(dc, top_pipe_to_program, true);
interdependent_update = top_pipe_to_program->plane_state &&
top_pipe_to_program->plane_state->update_flags.bits.full_update;
if (interdependent_update)
lock_all_pipes(dc, context, true);
else
dcn10_pipe_control_lock(dc, top_pipe_to_program, true);
if (num_planes == 0) {
/* OTG blank before remove all front end */
@ -2357,15 +2365,9 @@ static void dcn10_apply_ctx_for_surface(
*/
if (pipe_ctx->plane_state && !old_pipe_ctx->plane_state) {
if (old_pipe_ctx->stream_res.tg == tg &&
old_pipe_ctx->plane_res.hubp &&
old_pipe_ctx->plane_res.hubp->opp_id != 0xf) {
old_pipe_ctx->plane_res.hubp &&
old_pipe_ctx->plane_res.hubp->opp_id != 0xf)
dcn10_disable_plane(dc, old_pipe_ctx);
/*
* power down fe will unlock when calling reset, need
* to lock it back here. Messy, need rework.
*/
pipe_ctx->stream_res.tg->funcs->lock(pipe_ctx->stream_res.tg);
}
}
if ((!pipe_ctx->plane_state ||
@ -2384,29 +2386,25 @@ static void dcn10_apply_ctx_for_surface(
if (num_planes > 0)
program_all_pipe_in_tree(dc, top_pipe_to_program, context);
dcn10_pipe_control_lock(dc, top_pipe_to_program, false);
if (top_pipe_to_program->plane_state &&
top_pipe_to_program->plane_state->update_flags.bits.full_update)
if (interdependent_update)
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
tg = pipe_ctx->stream_res.tg;
/* Skip inactive pipes and ones already updated */
if (!pipe_ctx->stream || pipe_ctx->stream == stream
|| !pipe_ctx->plane_state
|| !tg->funcs->is_tg_enabled(tg))
if (!pipe_ctx->stream || pipe_ctx->stream == stream ||
!pipe_ctx->plane_state || !tg->funcs->is_tg_enabled(tg))
continue;
tg->funcs->lock(tg);
pipe_ctx->plane_res.hubp->funcs->hubp_setup_interdependent(
pipe_ctx->plane_res.hubp,
&pipe_ctx->dlg_regs,
&pipe_ctx->ttu_regs);
tg->funcs->unlock(tg);
}
if (interdependent_update)
lock_all_pipes(dc, context, false);
else
dcn10_pipe_control_lock(dc, top_pipe_to_program, false);
if (num_planes == 0)
false_optc_underflow_wa(dc, stream, tg);
@ -2443,7 +2441,7 @@ static void dcn10_prepare_bandwidth(
if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
if (context->stream_count == 0)
context->bw.dcn.clk.phyclk_khz = 0;
context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
dc->res_pool->clk_mgr->funcs->update_clocks(
dc->res_pool->clk_mgr,
@ -2452,7 +2450,7 @@ static void dcn10_prepare_bandwidth(
}
hubbub1_program_watermarks(dc->res_pool->hubbub,
&context->bw.dcn.watermarks,
&context->bw_ctx.bw.dcn.watermarks,
dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
true);
dcn10_stereo_hw_frame_pack_wa(dc, context);
@ -2473,7 +2471,7 @@ static void dcn10_optimize_bandwidth(
if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
if (context->stream_count == 0)
context->bw.dcn.clk.phyclk_khz = 0;
context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
dc->res_pool->clk_mgr->funcs->update_clocks(
dc->res_pool->clk_mgr,
@ -2482,7 +2480,7 @@ static void dcn10_optimize_bandwidth(
}
hubbub1_program_watermarks(dc->res_pool->hubbub,
&context->bw.dcn.watermarks,
&context->bw_ctx.bw.dcn.watermarks,
dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
true);
dcn10_stereo_hw_frame_pack_wa(dc, context);
@ -2710,9 +2708,15 @@ static void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
.rotation = pipe_ctx->plane_state->rotation,
.mirror = pipe_ctx->plane_state->horizontal_mirror
};
uint32_t x_plane = pipe_ctx->plane_state->dst_rect.x;
uint32_t y_plane = pipe_ctx->plane_state->dst_rect.y;
uint32_t x_offset = min(x_plane, pos_cpy.x);
uint32_t y_offset = min(y_plane, pos_cpy.y);
pos_cpy.x_hotspot += pipe_ctx->plane_state->dst_rect.x;
pos_cpy.y_hotspot += pipe_ctx->plane_state->dst_rect.y;
pos_cpy.x -= x_offset;
pos_cpy.y -= y_offset;
pos_cpy.x_hotspot += (x_plane - x_offset);
pos_cpy.y_hotspot += (y_plane - y_offset);
if (pipe_ctx->plane_state->address.type
== PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
@ -2807,6 +2811,33 @@ int get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx)
return vertical_line_start;
}
void lock_all_pipes(struct dc *dc,
struct dc_state *context,
bool lock)
{
struct pipe_ctx *pipe_ctx;
struct timing_generator *tg;
int i;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
pipe_ctx = &context->res_ctx.pipe_ctx[i];
tg = pipe_ctx->stream_res.tg;
/*
* Only lock the top pipe's tg to prevent redundant
* (un)locking. Also skip if pipe is disabled.
*/
if (pipe_ctx->top_pipe ||
!pipe_ctx->stream || !pipe_ctx->plane_state ||
!tg->funcs->is_tg_enabled(tg))
continue;
if (lock)
tg->funcs->lock(tg);
else
tg->funcs->unlock(tg);
}
}
static void calc_vupdate_position(
struct pipe_ctx *pipe_ctx,
uint32_t *start_line,

View File

@ -83,4 +83,8 @@ struct pipe_ctx *find_top_pipe_for_stream(
int get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx);
void lock_all_pipes(struct dc *dc,
struct dc_state *context,
bool lock);
#endif /* __DC_HWSS_DCN10_H__ */

View File

@ -472,12 +472,12 @@ static unsigned int dcn10_get_clock_states(struct dc *dc, char *pBuf, unsigned i
chars_printed = snprintf_count(pBuf, bufSize, "dcfclk,dcfclk_deep_sleep,dispclk,"
"dppclk,fclk,socclk\n"
"%d,%d,%d,%d,%d,%d\n",
dc->current_state->bw.dcn.clk.dcfclk_khz,
dc->current_state->bw.dcn.clk.dcfclk_deep_sleep_khz,
dc->current_state->bw.dcn.clk.dispclk_khz,
dc->current_state->bw.dcn.clk.dppclk_khz,
dc->current_state->bw.dcn.clk.fclk_khz,
dc->current_state->bw.dcn.clk.socclk_khz);
dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_khz,
dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz,
dc->current_state->bw_ctx.bw.dcn.clk.dispclk_khz,
dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz,
dc->current_state->bw_ctx.bw.dcn.clk.fclk_khz,
dc->current_state->bw_ctx.bw.dcn.clk.socclk_khz);
remaining_buffer -= chars_printed;
pBuf += chars_printed;

View File

@ -1146,7 +1146,7 @@ static enum dc_status dcn10_validate_global(struct dc *dc, struct dc_state *cont
continue;
if (context->stream_status[i].plane_count > 2)
return false;
return DC_FAIL_UNSUPPORTED_1;
for (j = 0; j < context->stream_status[i].plane_count; j++) {
struct dc_plane_state *plane =

View File

@ -43,7 +43,7 @@ enum dc_status {
DC_FAIL_BANDWIDTH_VALIDATE = 13, /* BW and Watermark validation */
DC_FAIL_SCALING = 14,
DC_FAIL_DP_LINK_TRAINING = 15,
DC_FAIL_UNSUPPORTED_1 = 18,
DC_ERROR_UNEXPECTED = -1
};

View File

@ -266,18 +266,22 @@ struct dcn_bw_output {
struct dcn_watermark_set watermarks;
};
union bw_context {
union bw_output {
struct dcn_bw_output dcn;
struct dce_bw_output dce;
};
struct bw_context {
union bw_output bw;
struct display_mode_lib dml;
};
/**
* struct dc_state - The full description of a state requested by a user
*
* @streams: Stream properties
* @stream_status: The planes on a given stream
* @res_ctx: Persistent state of resources
* @bw: The output from bandwidth and watermark calculations
* @bw_ctx: The output from bandwidth and watermark calculations and the DML
* @pp_display_cfg: PowerPlay clocks and settings
* @dcn_bw_vars: non-stack memory to support bandwidth calculations
*
@ -289,7 +293,7 @@ struct dc_state {
struct resource_context res_ctx;
union bw_context bw;
struct bw_context bw_ctx;
/* Note: these are big structures, do *not* put on stack! */
struct dm_pp_display_configuration pp_display_cfg;
@ -297,7 +301,7 @@ struct dc_state {
struct dcn_bw_internal_vars dcn_bw_vars;
#endif
struct clk_mgr *dccg;
struct clk_mgr *clk_mgr;
struct {
bool full_update_needed : 1;

View File

@ -81,6 +81,11 @@ static const struct irq_source_info_funcs vblank_irq_info_funcs = {
.ack = NULL
};
static const struct irq_source_info_funcs vupdate_irq_info_funcs = {
.set = NULL,
.ack = NULL
};
#define hpd_int_entry(reg_num)\
[DC_IRQ_SOURCE_HPD1 + reg_num] = {\
.enable_reg = mmHPD ## reg_num ## _DC_HPD_INT_CONTROL,\
@ -137,7 +142,7 @@ static const struct irq_source_info_funcs vblank_irq_info_funcs = {
CRTC_V_UPDATE_INT_STATUS__CRTC_V_UPDATE_INT_CLEAR_MASK,\
.ack_value =\
CRTC_V_UPDATE_INT_STATUS__CRTC_V_UPDATE_INT_CLEAR_MASK,\
.funcs = &vblank_irq_info_funcs\
.funcs = &vupdate_irq_info_funcs\
}
#define vblank_int_entry(reg_num)\

View File

@ -84,6 +84,11 @@ static const struct irq_source_info_funcs vblank_irq_info_funcs = {
.ack = NULL
};
static const struct irq_source_info_funcs vupdate_irq_info_funcs = {
.set = NULL,
.ack = NULL
};
#define BASE_INNER(seg) \
DCE_BASE__INST0_SEG ## seg
@ -140,7 +145,7 @@ static const struct irq_source_info_funcs vblank_irq_info_funcs = {
IRQ_REG_ENTRY(CRTC, reg_num,\
CRTC_INTERRUPT_CONTROL, CRTC_V_UPDATE_INT_MSK,\
CRTC_V_UPDATE_INT_STATUS, CRTC_V_UPDATE_INT_CLEAR),\
.funcs = &vblank_irq_info_funcs\
.funcs = &vupdate_irq_info_funcs\
}
#define vblank_int_entry(reg_num)\

View File

@ -84,6 +84,10 @@ static const struct irq_source_info_funcs vblank_irq_info_funcs = {
.ack = NULL
};
static const struct irq_source_info_funcs vupdate_irq_info_funcs = {
.set = NULL,
.ack = NULL
};
#define hpd_int_entry(reg_num)\
[DC_IRQ_SOURCE_INVALID + reg_num] = {\
@ -142,7 +146,7 @@ static const struct irq_source_info_funcs vblank_irq_info_funcs = {
CRTC_V_UPDATE_INT_STATUS__CRTC_V_UPDATE_INT_CLEAR_MASK,\
.ack_value =\
CRTC_V_UPDATE_INT_STATUS__CRTC_V_UPDATE_INT_CLEAR_MASK,\
.funcs = &vblank_irq_info_funcs\
.funcs = &vupdate_irq_info_funcs\
}
#define vblank_int_entry(reg_num)\

View File

@ -56,6 +56,18 @@ enum dc_irq_source to_dal_irq_source_dcn10(
return DC_IRQ_SOURCE_VBLANK5;
case DCN_1_0__SRCID__DC_D6_OTG_VSTARTUP:
return DC_IRQ_SOURCE_VBLANK6;
case DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
return DC_IRQ_SOURCE_VUPDATE1;
case DCN_1_0__SRCID__OTG1_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
return DC_IRQ_SOURCE_VUPDATE2;
case DCN_1_0__SRCID__OTG2_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
return DC_IRQ_SOURCE_VUPDATE3;
case DCN_1_0__SRCID__OTG3_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
return DC_IRQ_SOURCE_VUPDATE4;
case DCN_1_0__SRCID__OTG4_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
return DC_IRQ_SOURCE_VUPDATE5;
case DCN_1_0__SRCID__OTG5_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
return DC_IRQ_SOURCE_VUPDATE6;
case DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT:
return DC_IRQ_SOURCE_PFLIP1;
case DCN_1_0__SRCID__HUBP1_FLIP_INTERRUPT:
@ -153,6 +165,11 @@ static const struct irq_source_info_funcs vblank_irq_info_funcs = {
.ack = NULL
};
static const struct irq_source_info_funcs vupdate_no_lock_irq_info_funcs = {
.set = NULL,
.ack = NULL
};
#define BASE_INNER(seg) \
DCE_BASE__INST0_SEG ## seg
@ -203,12 +220,15 @@ static const struct irq_source_info_funcs vblank_irq_info_funcs = {
.funcs = &pflip_irq_info_funcs\
}
#define vupdate_int_entry(reg_num)\
/* vupdate_no_lock_int_entry maps to DC_IRQ_SOURCE_VUPDATEx, to match semantic
* of DCE's DC_IRQ_SOURCE_VUPDATEx.
*/
#define vupdate_no_lock_int_entry(reg_num)\
[DC_IRQ_SOURCE_VUPDATE1 + reg_num] = {\
IRQ_REG_ENTRY(OTG, reg_num,\
OTG_GLOBAL_SYNC_STATUS, VUPDATE_INT_EN,\
OTG_GLOBAL_SYNC_STATUS, VUPDATE_EVENT_CLEAR),\
.funcs = &vblank_irq_info_funcs\
OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_INT_EN,\
OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_EVENT_CLEAR),\
.funcs = &vupdate_no_lock_irq_info_funcs\
}
#define vblank_int_entry(reg_num)\
@ -315,12 +335,12 @@ irq_source_info_dcn10[DAL_IRQ_SOURCES_NUMBER] = {
dc_underflow_int_entry(6),
[DC_IRQ_SOURCE_DMCU_SCP] = dummy_irq_entry(),
[DC_IRQ_SOURCE_VBIOS_SW] = dummy_irq_entry(),
vupdate_int_entry(0),
vupdate_int_entry(1),
vupdate_int_entry(2),
vupdate_int_entry(3),
vupdate_int_entry(4),
vupdate_int_entry(5),
vupdate_no_lock_int_entry(0),
vupdate_no_lock_int_entry(1),
vupdate_no_lock_int_entry(2),
vupdate_no_lock_int_entry(3),
vupdate_no_lock_int_entry(4),
vupdate_no_lock_int_entry(5),
vblank_int_entry(0),
vblank_int_entry(1),
vblank_int_entry(2),

View File

@ -43,10 +43,10 @@ static const unsigned char max_reduction_table[13] = {
/* Possible ABM 2.2 Min Reduction configs from least aggressive to most aggressive
* 0 1 2 3 4 5 6 7 8 9 10 11 12
* 100 100 100 100 100 100 100 90.2 85.1 80.0 80.0 75.3 75.3 %
* 100 100 100 100 100 100 100 100 100 92.2 83.1 75.3 75.3 %
*/
static const unsigned char min_reduction_table_v_2_2[13] = {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe6, 0xd9, 0xcc, 0xcc, 0xc0, 0xc0};
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xeb, 0xd4, 0xc0, 0xc0};
/* Possible ABM 2.2 Max Reduction configs from least aggressive to most aggressive
* 0 1 2 3 4 5 6 7 8 9 10 11 12

View File

@ -97,16 +97,19 @@ int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor,
return ret;
}
int smu_update_table(struct smu_context *smu, uint32_t table_id,
int smu_update_table_with_arg(struct smu_context *smu, uint16_t table_id, uint16_t exarg,
void *table_data, bool drv2smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
struct smu_table *table = NULL;
int ret = 0;
uint32_t table_index;
if (!table_data || table_id >= smu_table->table_count)
return -EINVAL;
table_index = (exarg << 16) | table_id;
table = &smu_table->tables[table_id];
if (drv2smu)
@ -123,7 +126,7 @@ int smu_update_table(struct smu_context *smu, uint32_t table_id,
ret = smu_send_smc_msg_with_param(smu, drv2smu ?
SMU_MSG_TransferTableDram2Smu :
SMU_MSG_TransferTableSmu2Dram,
table_id);
table_index);
if (ret)
return ret;

View File

@ -77,7 +77,7 @@
#define PCIE_BUS_CLK 10000
#define TCLK (PCIE_BUS_CLK / 10)
static const struct profile_mode_setting smu7_profiling[7] =
static struct profile_mode_setting smu7_profiling[7] =
{{0, 0, 0, 0, 0, 0, 0, 0},
{1, 0, 100, 30, 1, 0, 100, 10},
{1, 10, 0, 30, 0, 0, 0, 0},
@ -4984,17 +4984,27 @@ static int smu7_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint
mode = input[size];
switch (mode) {
case PP_SMC_POWER_PROFILE_CUSTOM:
if (size < 8)
if (size < 8 && size != 0)
return -EINVAL;
tmp.bupdate_sclk = input[0];
tmp.sclk_up_hyst = input[1];
tmp.sclk_down_hyst = input[2];
tmp.sclk_activity = input[3];
tmp.bupdate_mclk = input[4];
tmp.mclk_up_hyst = input[5];
tmp.mclk_down_hyst = input[6];
tmp.mclk_activity = input[7];
/* If only CUSTOM is passed in, use the saved values. Check
* that we actually have a CUSTOM profile by ensuring that
* the "use sclk" or the "use mclk" bits are set
*/
tmp = smu7_profiling[PP_SMC_POWER_PROFILE_CUSTOM];
if (size == 0) {
if (tmp.bupdate_sclk == 0 && tmp.bupdate_mclk == 0)
return -EINVAL;
} else {
tmp.bupdate_sclk = input[0];
tmp.sclk_up_hyst = input[1];
tmp.sclk_down_hyst = input[2];
tmp.sclk_activity = input[3];
tmp.bupdate_mclk = input[4];
tmp.mclk_up_hyst = input[5];
tmp.mclk_down_hyst = input[6];
tmp.mclk_activity = input[7];
smu7_profiling[PP_SMC_POWER_PROFILE_CUSTOM] = tmp;
}
if (!smum_update_dpm_settings(hwmgr, &tmp)) {
memcpy(&data->current_profile_setting, &tmp, sizeof(struct profile_mode_setting));
hwmgr->power_profile_mode = mode;

View File

@ -1427,6 +1427,15 @@ static int vega10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
vega10_setup_default_pcie_table(hwmgr);
/* Zero out the saved copy of the CUSTOM profile
* This will be checked when trying to set the profile
* and will require that new values be passed in
*/
data->custom_profile_mode[0] = 0;
data->custom_profile_mode[1] = 0;
data->custom_profile_mode[2] = 0;
data->custom_profile_mode[3] = 0;
/* save a copy of the default DPM table */
memcpy(&(data->golden_dpm_table), &(data->dpm_table),
sizeof(struct vega10_dpm_table));
@ -4906,13 +4915,21 @@ static int vega10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, ui
uint8_t min_active_level;
uint32_t power_profile_mode = input[size];
smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask,
1 << power_profile_mode);
if (power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
if (size == 0 || size > 4)
if (size != 0 && size != 4)
return -EINVAL;
/* If size = 0 and the CUSTOM profile has been set already
* then just apply the profile. The copy stored in the hwmgr
* is zeroed out on init
*/
if (size == 0) {
if (data->custom_profile_mode[0] != 0)
goto out;
else
return -EINVAL;
}
data->custom_profile_mode[0] = busy_set_point = input[0];
data->custom_profile_mode[1] = FPS = input[1];
data->custom_profile_mode[2] = use_rlc_busy = input[2];
@ -4923,6 +4940,9 @@ static int vega10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, ui
use_rlc_busy << 16 | min_active_level<<24);
}
out:
smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask,
1 << power_profile_mode);
hwmgr->power_profile_mode = power_profile_mode;
return 0;

View File

@ -427,6 +427,7 @@ static int vega20_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
hwmgr->platform_descriptor.clockStep.memoryClock = 500;
data->total_active_cus = adev->gfx.cu_info.number;
data->is_custom_profile_set = false;
return 0;
}
@ -3827,7 +3828,11 @@ static int vega20_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, ui
}
if (power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
if (size < 10)
struct vega20_hwmgr *data =
(struct vega20_hwmgr *)(hwmgr->backend);
if (size == 0 && !data->is_custom_profile_set)
return -EINVAL;
if (size < 10 && size != 0)
return -EINVAL;
result = vega20_get_activity_monitor_coeff(hwmgr,
@ -3837,6 +3842,13 @@ static int vega20_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, ui
"[SetPowerProfile] Failed to get activity monitor!",
return result);
/* If size==0, then we want to apply the already-configured
* CUSTOM profile again. Just apply it, since we checked its
* validity above
*/
if (size == 0)
goto out;
switch (input[0]) {
case 0: /* Gfxclk */
activity_monitor.Gfx_FPS = input[1];
@ -3887,11 +3899,13 @@ static int vega20_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, ui
result = vega20_set_activity_monitor_coeff(hwmgr,
(uint8_t *)(&activity_monitor),
WORKLOAD_PPLIB_CUSTOM_BIT);
data->is_custom_profile_set = true;
PP_ASSERT_WITH_CODE(!result,
"[SetPowerProfile] Failed to set activity monitor!",
return result);
}
out:
/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
workload_type =
conv_power_profile_to_pplib_workload(power_profile_mode);

View File

@ -530,6 +530,8 @@ struct vega20_hwmgr {
bool pcie_parameters_override;
uint32_t pcie_gen_level1;
uint32_t pcie_width_level1;
bool is_custom_profile_set;
};
#define VEGA20_DPM2_NEAR_TDP_DEC 10

View File

@ -524,12 +524,6 @@ struct smu_funcs
struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges);
int (*set_od8_default_settings)(struct smu_context *smu,
bool initialize);
int (*get_activity_monitor_coeff)(struct smu_context *smu,
uint8_t *table,
uint16_t workload_type);
int (*set_activity_monitor_coeff)(struct smu_context *smu,
uint8_t *table,
uint16_t workload_type);
int (*conv_power_profile_to_pplib_workload)(int power_profile);
int (*get_power_profile_mode)(struct smu_context *smu, char *buf);
int (*set_power_profile_mode)(struct smu_context *smu, long *input, uint32_t size);
@ -745,8 +739,11 @@ extern int smu_feature_set_enabled(struct smu_context *smu, int feature_id, bool
extern int smu_feature_is_supported(struct smu_context *smu, int feature_id);
extern int smu_feature_set_supported(struct smu_context *smu, int feature_id, bool enable);
int smu_update_table(struct smu_context *smu, uint32_t table_id,
int smu_update_table_with_arg(struct smu_context *smu, uint16_t table_id, uint16_t exarg,
void *table_data, bool drv2smu);
#define smu_update_table(smu, table_id, table_data, drv2smu) \
smu_update_table_with_arg((smu), (table_id), 0, (table_data), (drv2smu))
bool is_support_sw_smu(struct amdgpu_device *adev);
int smu_reset(struct smu_context *smu);
int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor,

View File

@ -1460,62 +1460,6 @@ static int smu_v11_0_set_od8_default_settings(struct smu_context *smu,
return 0;
}
static int smu_v11_0_set_activity_monitor_coeff(struct smu_context *smu,
uint8_t *table, uint16_t workload_type)
{
int ret = 0;
memcpy(smu->smu_table.tables[TABLE_ACTIVITY_MONITOR_COEFF].cpu_addr,
table, smu->smu_table.tables[TABLE_ACTIVITY_MONITOR_COEFF].size);
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrHigh,
upper_32_bits(smu->smu_table.tables[TABLE_ACTIVITY_MONITOR_COEFF].mc_address));
if (ret) {
pr_err("[%s] Attempt to Set Dram Addr High Failed!", __func__);
return ret;
}
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrLow,
lower_32_bits(smu->smu_table.tables[TABLE_ACTIVITY_MONITOR_COEFF].mc_address));
if (ret) {
pr_err("[%s] Attempt to Set Dram Addr Low Failed!", __func__);
return ret;
}
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_TransferTableSmu2Dram,
TABLE_ACTIVITY_MONITOR_COEFF | (workload_type << 16));
if (ret) {
pr_err("[%s] Attempt to Transfer Table From SMU Failed!", __func__);
return ret;
}
return ret;
}
static int smu_v11_0_get_activity_monitor_coeff(struct smu_context *smu,
uint8_t *table, uint16_t workload_type)
{
int ret = 0;
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrHigh,
upper_32_bits(smu->smu_table.tables[TABLE_ACTIVITY_MONITOR_COEFF].mc_address));
if (ret) {
pr_err("[%s] Attempt to Set Dram Addr High Failed!", __func__);
return ret;
}
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrLow,
lower_32_bits(smu->smu_table.tables[TABLE_ACTIVITY_MONITOR_COEFF].mc_address));
if (ret) {
pr_err("[%s] Attempt to Set Dram Addr Low Failed!", __func__);
return ret;
}
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_TransferTableSmu2Dram,
TABLE_ACTIVITY_MONITOR_COEFF | (workload_type << 16));
if (ret) {
pr_err("[%s] Attempt to Transfer Table From SMU Failed!", __func__);
return ret;
}
return ret;
}
static int smu_v11_0_conv_power_profile_to_pplib_workload(int power_profile)
{
int pplib_workload = 0;
@ -1584,9 +1528,8 @@ static int smu_v11_0_get_power_profile_mode(struct smu_context *smu, char *buf)
for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) {
/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
workload_type = smu_v11_0_conv_power_profile_to_pplib_workload(i);
result = smu_v11_0_get_activity_monitor_coeff(smu,
(uint8_t *)(&activity_monitor),
workload_type);
result = smu_update_table_with_arg(smu, TABLE_ACTIVITY_MONITOR_COEFF,
workload_type, &activity_monitor, false);
if (result) {
pr_err("[%s] Failed to get activity monitor!", __func__);
return result;
@ -1658,7 +1601,7 @@ static int smu_v11_0_get_power_profile_mode(struct smu_context *smu, char *buf)
static int smu_v11_0_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size)
{
DpmActivityMonitorCoeffInt_t activity_monitor;
int workload_type, ret = 0;
int workload_type = 0, ret = 0;
smu->power_profile_mode = input[size];
@ -1668,9 +1611,8 @@ static int smu_v11_0_set_power_profile_mode(struct smu_context *smu, long *input
}
if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
ret = smu_v11_0_get_activity_monitor_coeff(smu,
(uint8_t *)(&activity_monitor),
WORKLOAD_PPLIB_CUSTOM_BIT);
ret = smu_update_table_with_arg(smu, TABLE_ACTIVITY_MONITOR_COEFF,
WORKLOAD_PPLIB_CUSTOM_BIT, &activity_monitor, false);
if (ret) {
pr_err("[%s] Failed to get activity monitor!", __func__);
return ret;
@ -1723,9 +1665,8 @@ static int smu_v11_0_set_power_profile_mode(struct smu_context *smu, long *input
break;
}
ret = smu_v11_0_set_activity_monitor_coeff(smu,
(uint8_t *)(&activity_monitor),
WORKLOAD_PPLIB_CUSTOM_BIT);
ret = smu_update_table_with_arg(smu, TABLE_ACTIVITY_MONITOR_COEFF,
WORKLOAD_PPLIB_COMPUTE_BIT, &activity_monitor, true);
if (ret) {
pr_err("[%s] Failed to set activity monitor!", __func__);
return ret;
@ -1994,8 +1935,6 @@ static const struct smu_funcs smu_v11_0_funcs = {
.get_sclk = smu_v11_0_dpm_get_sclk,
.get_mclk = smu_v11_0_dpm_get_mclk,
.set_od8_default_settings = smu_v11_0_set_od8_default_settings,
.get_activity_monitor_coeff = smu_v11_0_get_activity_monitor_coeff,
.set_activity_monitor_coeff = smu_v11_0_set_activity_monitor_coeff,
.conv_power_profile_to_pplib_workload = smu_v11_0_conv_power_profile_to_pplib_workload,
.get_power_profile_mode = smu_v11_0_get_power_profile_mode,
.set_power_profile_mode = smu_v11_0_set_power_profile_mode,