From d65848657c3da5c0d4b685f823d0230f151ab34e Mon Sep 17 00:00:00 2001 From: Kent Russell Date: Tue, 23 Jul 2019 10:18:01 -0400 Subject: [PATCH 01/18] drm/amdkfd: Fix byte align on VegaM This was missed during the addition of VegaM support Reviewed-by: Alex Deucher Signed-off-by: Kent Russell Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 1d3ee9c42f7e..6a5c96e519b1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -1140,7 +1140,8 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( adev->asic_type != CHIP_FIJI && adev->asic_type != CHIP_POLARIS10 && adev->asic_type != CHIP_POLARIS11 && - adev->asic_type != CHIP_POLARIS12) ? + adev->asic_type != CHIP_POLARIS12 && + adev->asic_type != CHIP_VEGAM) ? VI_BO_SIZE_ALIGN : 1; mapping_flags = AMDGPU_VM_PAGE_READABLE; From 2c0f07fe3ca57c8fb4ee179c9fb50d6eba75349e Mon Sep 17 00:00:00 2001 From: Kevin Wang Date: Mon, 3 Jun 2019 15:58:31 +0800 Subject: [PATCH 02/18] drm/amd/powerplay: add callback function of get_thermal_temperature_range 1. the thermal temperature is asic related data, move the code logic to xxx_ppt.c. 2. replace data structure PP_TemperatureRange with smu_temperature_range. 3. change temperature uint from temp*1000 to temp (temperature uint). Signed-off-by: Kevin Wang Signed-off-by: Kenneth Feng Acked-by: Huang Rui Signed-off-by: Alex Deucher --- .../gpu/drm/amd/powerplay/inc/amdgpu_smu.h | 1 - drivers/gpu/drm/amd/powerplay/navi10_ppt.c | 17 ++++++++++ drivers/gpu/drm/amd/powerplay/smu_v11_0.c | 18 ++++++---- drivers/gpu/drm/amd/powerplay/vega20_ppt.c | 34 ++++++------------- 4 files changed, 40 insertions(+), 30 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h index 22e46a289a16..208e6711d506 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h @@ -429,7 +429,6 @@ struct smu_table_context struct smu_table *tables; uint32_t table_count; struct smu_table memory_pool; - uint16_t software_shutdown_temp; uint8_t thermal_controller_type; uint16_t TDPODLimit; diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c index 4aaad255a288..3f68268a8733 100644 --- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c @@ -1620,6 +1620,22 @@ static int navi10_set_performance_level(struct smu_context *smu, enum amd_dpm_fo return ret; } +static int navi10_get_thermal_temperature_range(struct smu_context *smu, + struct smu_temperature_range *range) +{ + struct smu_table_context *table_context = &smu->smu_table; + struct smu_11_0_powerplay_table *powerplay_table = table_context->power_play_table; + + if (!range || !powerplay_table) + return -EINVAL; + + /* The unit is temperature */ + range->min = 0; + range->max = powerplay_table->software_shutdown_temp; + + return 0; +} + static const struct pptable_funcs navi10_ppt_funcs = { .tables_init = navi10_tables_init, .alloc_dpm_context = navi10_allocate_dpm_context, @@ -1657,6 +1673,7 @@ static const struct pptable_funcs navi10_ppt_funcs = { .get_ppfeature_status = navi10_get_ppfeature_status, .set_ppfeature_status = navi10_set_ppfeature_status, .set_performance_level = navi10_set_performance_level, + .get_thermal_temperature_range = navi10_get_thermal_temperature_range, }; void navi10_set_ppt_funcs(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c index caca9091bfcc..1ecb409e3bed 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c +++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c @@ -1124,10 +1124,8 @@ static int smu_v11_0_set_thermal_range(struct smu_context *smu, struct smu_temperature_range *range) { struct amdgpu_device *adev = smu->adev; - int low = SMU_THERMAL_MINIMUM_ALERT_TEMP * - SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; - int high = SMU_THERMAL_MAXIMUM_ALERT_TEMP * - SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + int low = SMU_THERMAL_MINIMUM_ALERT_TEMP; + int high = SMU_THERMAL_MAXIMUM_ALERT_TEMP; uint32_t val; if (!range) @@ -1138,6 +1136,9 @@ static int smu_v11_0_set_thermal_range(struct smu_context *smu, if (high > range->max) high = range->max; + low = max(SMU_THERMAL_MINIMUM_ALERT_TEMP, range->min); + high = min(SMU_THERMAL_MAXIMUM_ALERT_TEMP, range->max); + if (low > high) return -EINVAL; @@ -1146,8 +1147,8 @@ static int smu_v11_0_set_thermal_range(struct smu_context *smu, val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1); val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTH_MASK, 0); val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTL_MASK, 0); - val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high / SMU_TEMPERATURE_UNITS_PER_CENTIGRADES)); - val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low / SMU_TEMPERATURE_UNITS_PER_CENTIGRADES)); + val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high & 0xff)); + val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low & 0xff)); val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK); WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val); @@ -1186,7 +1187,10 @@ static int smu_v11_0_start_thermal_control(struct smu_context *smu) if (!smu->pm_enabled) return ret; + ret = smu_get_thermal_temperature_range(smu, &range); + if (ret) + return ret; if (smu->smu_table.thermal_controller_type) { ret = smu_v11_0_set_thermal_range(smu, &range); @@ -1211,6 +1215,8 @@ static int smu_v11_0_start_thermal_control(struct smu_context *smu) adev->pm.dpm.thermal.min_mem_temp = range.mem_min; adev->pm.dpm.thermal.max_mem_crit_temp = range.mem_crit_max; adev->pm.dpm.thermal.max_mem_emergency_temp = range.mem_emergency_max; + adev->pm.dpm.thermal.min_temp = range.min * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + adev->pm.dpm.thermal.max_temp = range.max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; return ret; } diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c index dc139a6feeb1..dd6fd1c8bf24 100644 --- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c @@ -450,7 +450,6 @@ static int vega20_store_powerplay_table(struct smu_context *smu) memcpy(table_context->driver_pptable, &powerplay_table->smcPPTable, sizeof(PPTable_t)); - table_context->software_shutdown_temp = powerplay_table->usSoftwareShutdownTemp; table_context->thermal_controller_type = powerplay_table->ucThermalControllerType; table_context->TDPODLimit = le32_to_cpu(powerplay_table->OverDrive8Table.ODSettingsMax[ATOM_VEGA20_ODSETTING_POWERPERCENTAGE]); @@ -3234,35 +3233,24 @@ static int vega20_set_watermarks_table(struct smu_context *smu, return 0; } -static const struct smu_temperature_range vega20_thermal_policy[] = -{ - {-273150, 99000, 99000, -273150, 99000, 99000, -273150, 99000, 99000}, - { 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000}, -}; - static int vega20_get_thermal_temperature_range(struct smu_context *smu, struct smu_temperature_range *range) { - + struct smu_table_context *table_context = &smu->smu_table; + ATOM_Vega20_POWERPLAYTABLE *powerplay_table = table_context->power_play_table; PPTable_t *pptable = smu->smu_table.driver_pptable; - if (!range) + if (!range || !powerplay_table) return -EINVAL; - memcpy(range, &vega20_thermal_policy[0], sizeof(struct smu_temperature_range)); - - range->max = pptable->TedgeLimit * - SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; - range->edge_emergency_max = (pptable->TedgeLimit + CTF_OFFSET_EDGE) * - SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; - range->hotspot_crit_max = pptable->ThotspotLimit * - SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; - range->hotspot_emergency_max = (pptable->ThotspotLimit + CTF_OFFSET_HOTSPOT) * - SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; - range->mem_crit_max = pptable->ThbmLimit * - SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; - range->mem_emergency_max = (pptable->ThbmLimit + CTF_OFFSET_HBM)* - SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + /* The unit is temperature */ + range->min = 0; + range->max = powerplay_table->usSoftwareShutdownTemp; + range->edge_emergency_max = (pptable->TedgeLimit + CTF_OFFSET_EDGE); + range->hotspot_crit_max = pptable->ThotspotLimit; + range->hotspot_emergency_max = (pptable->ThotspotLimit + CTF_OFFSET_HOTSPOT); + range->mem_crit_max = pptable->ThbmLimit; + range->mem_emergency_max = (pptable->ThbmLimit + CTF_OFFSET_HBM); return 0; From 45a660143bf90a35ab64df663b88d82c02a17091 Mon Sep 17 00:00:00 2001 From: Kevin Wang Date: Tue, 23 Jul 2019 19:56:52 +0800 Subject: [PATCH 03/18] drm/amd/powerplay: fix temperature granularity error in smu11 in this patch, drm/amd/powerplay: add callback function of get_thermal_temperature_range the driver missed temperature granularity change on other temperature. Signed-off-by: Kevin Wang Reviewed-by: Evan Quan Reviewed-by: Kenneth Feng Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/smu_v11_0.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c index 1ecb409e3bed..ac5b26228e75 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c +++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c @@ -1206,15 +1206,15 @@ static int smu_v11_0_start_thermal_control(struct smu_context *smu) return ret; } - adev->pm.dpm.thermal.min_temp = range.min; - adev->pm.dpm.thermal.max_temp = range.max; - adev->pm.dpm.thermal.max_edge_emergency_temp = range.edge_emergency_max; - adev->pm.dpm.thermal.min_hotspot_temp = range.hotspot_min; - adev->pm.dpm.thermal.max_hotspot_crit_temp = range.hotspot_crit_max; - adev->pm.dpm.thermal.max_hotspot_emergency_temp = range.hotspot_emergency_max; - adev->pm.dpm.thermal.min_mem_temp = range.mem_min; - adev->pm.dpm.thermal.max_mem_crit_temp = range.mem_crit_max; - adev->pm.dpm.thermal.max_mem_emergency_temp = range.mem_emergency_max; + adev->pm.dpm.thermal.min_temp = range.min * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + adev->pm.dpm.thermal.max_temp = range.max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + adev->pm.dpm.thermal.max_edge_emergency_temp = range.edge_emergency_max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + adev->pm.dpm.thermal.min_hotspot_temp = range.hotspot_min * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + adev->pm.dpm.thermal.max_hotspot_crit_temp = range.hotspot_crit_max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + adev->pm.dpm.thermal.max_hotspot_emergency_temp = range.hotspot_emergency_max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + adev->pm.dpm.thermal.min_mem_temp = range.mem_min * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + adev->pm.dpm.thermal.max_mem_crit_temp = range.mem_crit_max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + adev->pm.dpm.thermal.max_mem_emergency_temp = range.mem_emergency_max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; adev->pm.dpm.thermal.min_temp = range.min * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; adev->pm.dpm.thermal.max_temp = range.max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; From 090efd946d00cd23ce4ac25bce125f408b704d7d Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 25 Jul 2019 22:28:58 -0500 Subject: [PATCH 04/18] drm/amdgpu/powerplay: use proper revision id for navi The PCI revision id determines the sku. Reviewed-by: Feifei Xu Reviewed-by: Kevin Wang Reviewed-by: Evan Quan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/navi10_ppt.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c index 3f68268a8733..be592d22bdcc 100644 --- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c @@ -23,6 +23,7 @@ #include "pp_debug.h" #include +#include #include "amdgpu.h" #include "amdgpu_smu.h" #include "atomfirmware.h" @@ -1573,7 +1574,7 @@ static int navi10_set_peak_clock_by_device(struct smu_context *smu) uint32_t sclk_freq = 0, uclk_freq = 0; uint32_t uclk_level = 0; - switch (adev->rev_id) { + switch (adev->pdev->revision) { case 0xf0: /* XTX */ case 0xc0: sclk_freq = NAVI10_PEAK_SCLK_XTX; From 479156f2e5540077377a823eaf5a4263bd329063 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Thu, 25 Jul 2019 12:10:34 +0800 Subject: [PATCH 05/18] drm/amd/powerplay: fix null pointer dereference around dpm state relates DPM state relates are not supported on the new SW SMU ASICs. But still it's not OK to trigger null pointer dereference on accessing them. Signed-off-by: Evan Quan Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | 18 +++++++++++++----- drivers/gpu/drm/amd/powerplay/amdgpu_smu.c | 3 ++- 2 files changed, 15 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index 03ca8c69114f..8c90baca07b2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -159,12 +159,16 @@ static ssize_t amdgpu_get_dpm_state(struct device *dev, struct amdgpu_device *adev = ddev->dev_private; enum amd_pm_state_type pm; - if (is_support_sw_smu(adev) && adev->smu.ppt_funcs->get_current_power_state) - pm = amdgpu_smu_get_current_power_state(adev); - else if (adev->powerplay.pp_funcs->get_current_power_state) + if (is_support_sw_smu(adev)) { + if (adev->smu.ppt_funcs->get_current_power_state) + pm = amdgpu_smu_get_current_power_state(adev); + else + pm = adev->pm.dpm.user_state; + } else if (adev->powerplay.pp_funcs->get_current_power_state) { pm = amdgpu_dpm_get_current_power_state(adev); - else + } else { pm = adev->pm.dpm.user_state; + } return snprintf(buf, PAGE_SIZE, "%s\n", (pm == POWER_STATE_TYPE_BATTERY) ? "battery" : @@ -191,7 +195,11 @@ static ssize_t amdgpu_set_dpm_state(struct device *dev, goto fail; } - if (adev->powerplay.pp_funcs->dispatch_tasks) { + if (is_support_sw_smu(adev)) { + mutex_lock(&adev->pm.mutex); + adev->pm.dpm.user_state = state; + mutex_unlock(&adev->pm.mutex); + } else if (adev->powerplay.pp_funcs->dispatch_tasks) { amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_ENABLE_USER_STATE, &state); } else { mutex_lock(&adev->pm.mutex); diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c index c097113c3976..88ed85e3d233 100644 --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c @@ -306,7 +306,8 @@ int smu_get_power_num_states(struct smu_context *smu, /* not support power state */ memset(state_info, 0, sizeof(struct pp_states_info)); - state_info->nums = 0; + state_info->nums = 1; + state_info->states[0] = POWER_STATE_TYPE_DEFAULT; return 0; } From f0bc1ee473fefd4d9f2ace9fad1cefdc0b7f6fdd Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Thu, 25 Jul 2019 10:12:42 +0800 Subject: [PATCH 06/18] drm/amd/powerplay: enable SW SMU reset functionality Move SMU irq handler register to sw_init as that's totally software related. Otherwise, it will prevent SMU reset working. Signed-off-by: Evan Quan Reviewed-by: Kenneth Feng Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/amdgpu_smu.c | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c index 88ed85e3d233..93cd969e5cf5 100644 --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c @@ -724,6 +724,12 @@ static int smu_sw_init(void *handle) return ret; } + ret = smu_register_irq_handler(smu); + if (ret) { + pr_err("Failed to register smc irq handler!\n"); + return ret; + } + return 0; } @@ -733,6 +739,9 @@ static int smu_sw_fini(void *handle) struct smu_context *smu = &adev->smu; int ret; + kfree(smu->irq_source); + smu->irq_source = NULL; + ret = smu_smc_table_sw_fini(smu); if (ret) { pr_err("Failed to sw fini smc table!\n"); @@ -1089,10 +1098,6 @@ static int smu_hw_init(void *handle) if (ret) goto failed; - ret = smu_register_irq_handler(smu); - if (ret) - goto failed; - if (!smu->pm_enabled) adev->pm.dpm_enabled = false; else @@ -1122,9 +1127,6 @@ static int smu_hw_fini(void *handle) kfree(table_context->overdrive_table); table_context->overdrive_table = NULL; - kfree(smu->irq_source); - smu->irq_source = NULL; - ret = smu_fini_fb_allocations(smu); if (ret) return ret; From 67d0859e2758ef992fd32499747ce4b1038a63c0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Tue, 30 Jul 2019 11:17:03 +0200 Subject: [PATCH 07/18] drm/amdgpu: fix error handling in amdgpu_cs_process_fence_dep MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We always need to drop the ctx reference and should check for errors first and then dereference the fence pointer. Signed-off-by: Christian König Reviewed-by: Chunming Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 26 ++++++++++++-------------- 1 file changed, 12 insertions(+), 14 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index e069de8b54e6..4e4094f842e7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -1044,29 +1044,27 @@ static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p, return r; } - fence = amdgpu_ctx_get_fence(ctx, entity, - deps[i].handle); + fence = amdgpu_ctx_get_fence(ctx, entity, deps[i].handle); + amdgpu_ctx_put(ctx); + + if (IS_ERR(fence)) + return PTR_ERR(fence); + else if (!fence) + continue; if (chunk->chunk_id == AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES) { - struct drm_sched_fence *s_fence = to_drm_sched_fence(fence); + struct drm_sched_fence *s_fence; struct dma_fence *old = fence; + s_fence = to_drm_sched_fence(fence); fence = dma_fence_get(&s_fence->scheduled); dma_fence_put(old); } - if (IS_ERR(fence)) { - r = PTR_ERR(fence); - amdgpu_ctx_put(ctx); + r = amdgpu_sync_fence(p->adev, &p->job->sync, fence, true); + dma_fence_put(fence); + if (r) return r; - } else if (fence) { - r = amdgpu_sync_fence(p->adev, &p->job->sync, fence, - true); - dma_fence_put(fence); - amdgpu_ctx_put(ctx); - if (r) - return r; - } } return 0; } From 929e571c04c285861e0bb049a396a2bdaea63282 Mon Sep 17 00:00:00 2001 From: Wang Xiayang Date: Sat, 27 Jul 2019 17:30:30 +0800 Subject: [PATCH 08/18] drm/amdgpu: fix a potential information leaking bug MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Coccinelle reports a path that the array "data" is never initialized. The path skips the checks in the conditional branches when either of callback functions, read_wave_vgprs and read_wave_sgprs, is not registered. Later, the uninitialized "data" array is read in the while-loop below and passed to put_user(). Fix the path by allocating the array with kcalloc(). The patch is simplier than adding a fall-back branch that explicitly calls memset(data, 0, ...). Also it does not need the multiplication 1024*sizeof(*data) as the size parameter for memset() though there is no risk of integer overflow. Signed-off-by: Wang Xiayang Reviewed-by: Chunming Zhou Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c index 6d54decef7f8..5652cc72ed3a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c @@ -707,7 +707,7 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf, thread = (*pos & GENMASK_ULL(59, 52)) >> 52; bank = (*pos & GENMASK_ULL(61, 60)) >> 60; - data = kmalloc_array(1024, sizeof(*data), GFP_KERNEL); + data = kcalloc(1024, sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; From a02709818f397e7ed7a0943d65a49d54b2752626 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Mon, 22 Jul 2019 09:51:59 +0800 Subject: [PATCH 09/18] drm/amd/powerplay: add new sensor type for VCN powergate status VCN is widely used in new ASICs and different from tranditional UVD and VCE. Signed-off-by: Evan Quan Reviewed-by: Kenneth Feng Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/include/kgd_pp_interface.h | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h index 9f661bf96ed0..5b1ebb7f995a 100644 --- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h @@ -123,6 +123,7 @@ enum amd_pp_sensors { AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK, AMDGPU_PP_SENSOR_MIN_FAN_RPM, AMDGPU_PP_SENSOR_MAX_FAN_RPM, + AMDGPU_PP_SENSOR_VCN_POWER_STATE, }; enum amd_pp_task { From 201cd702b7012ecee2a613e09b6a227ca0e12504 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Mon, 22 Jul 2019 09:55:36 +0800 Subject: [PATCH 10/18] drm/amd/powerplay: support VCN powergate status retrieval on Raven Enable VCN powergate status report on Raven. Signed-off-by: Evan Quan Reviewed-by: Kenneth Feng Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c index e32ae9d3373c..18e780f566fa 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c @@ -1111,6 +1111,7 @@ static int smu10_thermal_get_temperature(struct pp_hwmgr *hwmgr) static int smu10_read_sensor(struct pp_hwmgr *hwmgr, int idx, void *value, int *size) { + struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend); uint32_t sclk, mclk; int ret = 0; @@ -1132,6 +1133,10 @@ static int smu10_read_sensor(struct pp_hwmgr *hwmgr, int idx, case AMDGPU_PP_SENSOR_GPU_TEMP: *((uint32_t *)value) = smu10_thermal_get_temperature(hwmgr); break; + case AMDGPU_PP_SENSOR_VCN_POWER_STATE: + *(uint32_t *)value = smu10_data->vcn_power_gated ? 0 : 1; + *size = 4; + break; default: ret = -EINVAL; break; @@ -1175,18 +1180,22 @@ static int smu10_powergate_sdma(struct pp_hwmgr *hwmgr, bool gate) static void smu10_powergate_vcn(struct pp_hwmgr *hwmgr, bool bgate) { + struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend); + if (bgate) { amdgpu_device_ip_set_powergating_state(hwmgr->adev, AMD_IP_BLOCK_TYPE_VCN, AMD_PG_STATE_GATE); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_PowerDownVcn, 0); + smu10_data->vcn_power_gated = true; } else { smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_PowerUpVcn, 0); amdgpu_device_ip_set_powergating_state(hwmgr->adev, AMD_IP_BLOCK_TYPE_VCN, AMD_PG_STATE_UNGATE); + smu10_data->vcn_power_gated = false; } } From e21e3581e2a1df75abb96b545be15e526bd8c1c6 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Mon, 22 Jul 2019 09:57:27 +0800 Subject: [PATCH 11/18] drm/amd/powerplay: support VCN powergate status retrieval for SW SMU Commonly used for VCN powergate status retrieval for SW SMU. Signed-off-by: Evan Quan Reviewed-by: Kenneth Feng Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/amdgpu_smu.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c index 93cd969e5cf5..0685a3388e38 100644 --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c @@ -338,6 +338,10 @@ int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor, *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0; *size = 4; break; + case AMDGPU_PP_SENSOR_VCN_POWER_STATE: + *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT) ? 1 : 0; + *size = 4; + break; default: ret = -EINVAL; break; From a3ebbdb95f8c343a547ee2abec4d8abbf71f8a94 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Mon, 22 Jul 2019 10:27:21 +0800 Subject: [PATCH 12/18] drm/amd/powerplay: correct Navi10 VCN powergate control (v2) No VCN DPM bit check as that's different from VCN PG. Also no extra check for possible double enablement/disablement as that's already done by VCN. v2: check return value of smu_feature_set_enabled Signed-off-by: Evan Quan Reviewed-by: Kenneth Feng Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/navi10_ppt.c | 28 ++++++++-------------- 1 file changed, 10 insertions(+), 18 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c index be592d22bdcc..cc0a3b2256af 100644 --- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c @@ -578,28 +578,20 @@ static int navi10_set_default_dpm_table(struct smu_context *smu) static int navi10_dpm_set_uvd_enable(struct smu_context *smu, bool enable) { int ret = 0; - struct smu_power_context *smu_power = &smu->smu_power; - struct smu_power_gate *power_gate = &smu_power->power_gate; - if (enable && power_gate->uvd_gated) { - if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT)) { - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 1); - if (ret) - return ret; - } - power_gate->uvd_gated = false; + if (enable) { + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 1); + if (ret) + return ret; } else { - if (!enable && !power_gate->uvd_gated) { - if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT)) { - ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn); - if (ret) - return ret; - } - power_gate->uvd_gated = true; - } + ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn); + if (ret) + return ret; } - return 0; + ret = smu_feature_set_enabled(smu, SMU_FEATURE_VCN_PG_BIT, enable); + + return ret; } static int navi10_get_current_clk_freq_by_table(struct smu_context *smu, From 6dee4829cfde106a8af7d0d3ba23022f8f054761 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Mon, 22 Jul 2019 10:42:29 +0800 Subject: [PATCH 13/18] drm/amd/powerplay: correct UVD/VCE/VCN power status retrieval VCN should be used for Vega20 later ASICs while UVD and VCE are for previous ASICs. Signed-off-by: Evan Quan Reviewed-by: Kenneth Feng Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | 56 +++++++++++++++++--------- 1 file changed, 36 insertions(+), 20 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index 8c90baca07b2..2b546567853b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -3075,28 +3075,44 @@ static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *a if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK, (void *)&value64, &size)) seq_printf(m, "SMC Feature Mask: 0x%016llx\n", value64); - /* UVD clocks */ - if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) { - if (!value) { - seq_printf(m, "UVD: Disabled\n"); - } else { - seq_printf(m, "UVD: Enabled\n"); - if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size)) - seq_printf(m, "\t%u MHz (DCLK)\n", value/100); - if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size)) - seq_printf(m, "\t%u MHz (VCLK)\n", value/100); + if (adev->asic_type > CHIP_VEGA20) { + /* VCN clocks */ + if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCN_POWER_STATE, (void *)&value, &size)) { + if (!value) { + seq_printf(m, "VCN: Disabled\n"); + } else { + seq_printf(m, "VCN: Enabled\n"); + if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size)) + seq_printf(m, "\t%u MHz (DCLK)\n", value/100); + if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size)) + seq_printf(m, "\t%u MHz (VCLK)\n", value/100); + } } - } - seq_printf(m, "\n"); + seq_printf(m, "\n"); + } else { + /* UVD clocks */ + if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) { + if (!value) { + seq_printf(m, "UVD: Disabled\n"); + } else { + seq_printf(m, "UVD: Enabled\n"); + if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size)) + seq_printf(m, "\t%u MHz (DCLK)\n", value/100); + if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size)) + seq_printf(m, "\t%u MHz (VCLK)\n", value/100); + } + } + seq_printf(m, "\n"); - /* VCE clocks */ - if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, (void *)&value, &size)) { - if (!value) { - seq_printf(m, "VCE: Disabled\n"); - } else { - seq_printf(m, "VCE: Enabled\n"); - if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, (void *)&value, &size)) - seq_printf(m, "\t%u MHz (ECCLK)\n", value/100); + /* VCE clocks */ + if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, (void *)&value, &size)) { + if (!value) { + seq_printf(m, "VCE: Disabled\n"); + } else { + seq_printf(m, "VCE: Enabled\n"); + if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, (void *)&value, &size)) + seq_printf(m, "\t%u MHz (ECCLK)\n", value/100); + } } } From 3de433c5b38af49a5fc7602721e2ab5d39f1e69c Mon Sep 17 00:00:00 2001 From: Rob Clark Date: Tue, 30 Jul 2019 14:46:28 -0700 Subject: [PATCH 14/18] drm/msm: Use the correct dma_sync calls in msm_gem [subject was: drm/msm: shake fist angrily at dma-mapping] So, using dma_sync_* for our cache needs works out w/ dma iommu ops, but it falls appart with dma direct ops. The problem is that, depending on display generation, we can have either set of dma ops (mdp4 and dpu have iommu wired to mdss node, which maps to toplevel drm device, but mdp5 has iommu wired up to the mdp sub-node within mdss). Fixes this splat on mdp5 devices: Unable to handle kernel paging request at virtual address ffffffff80000000 Mem abort info: ESR = 0x96000144 Exception class = DABT (current EL), IL = 32 bits SET = 0, FnV = 0 EA = 0, S1PTW = 0 Data abort info: ISV = 0, ISS = 0x00000144 CM = 1, WnR = 1 swapper pgtable: 4k pages, 48-bit VAs, pgdp=00000000810e4000 [ffffffff80000000] pgd=0000000000000000 Internal error: Oops: 96000144 [#1] SMP Modules linked in: btqcomsmd btqca bluetooth cfg80211 ecdh_generic ecc rfkill libarc4 panel_simple msm wcnss_ctrl qrtr_smd drm_kms_helper venus_enc venus_dec videobuf2_dma_sg videobuf2_memops drm venus_core ipv6 qrtr qcom_wcnss_pil v4l2_mem2mem qcom_sysmon videobuf2_v4l2 qmi_helpers videobuf2_common crct10dif_ce mdt_loader qcom_common videodev qcom_glink_smem remoteproc bmc150_accel_i2c bmc150_magn_i2c bmc150_accel_core bmc150_magn snd_soc_lpass_apq8016 snd_soc_msm8916_analog mms114 mc nf_defrag_ipv6 snd_soc_lpass_cpu snd_soc_apq8016_sbc industrialio_triggered_buffer kfifo_buf snd_soc_lpass_platform snd_soc_msm8916_digital drm_panel_orientation_quirks CPU: 2 PID: 33 Comm: kworker/2:1 Not tainted 5.3.0-rc2 #1 Hardware name: Samsung Galaxy A5U (EUR) (DT) Workqueue: events deferred_probe_work_func pstate: 80000005 (Nzcv daif -PAN -UAO) pc : __clean_dcache_area_poc+0x20/0x38 lr : arch_sync_dma_for_device+0x28/0x30 sp : ffff0000115736a0 x29: ffff0000115736a0 x28: 0000000000000001 x27: ffff800074830800 x26: ffff000011478000 x25: 0000000000000000 x24: 0000000000000001 x23: ffff000011478a98 x22: ffff800009fd1c10 x21: 0000000000000001 x20: ffff800075ad0a00 x19: 0000000000000000 x18: ffff0000112b2000 x17: 0000000000000000 x16: 0000000000000000 x15: 00000000fffffff0 x14: ffff000011455d70 x13: 0000000000000000 x12: 0000000000000028 x11: 0000000000000001 x10: ffff00001106c000 x9 : ffff7e0001d6b380 x8 : 0000000000001000 x7 : ffff7e0001d6b380 x6 : ffff7e0001d6b382 x5 : 0000000000000000 x4 : 0000000000001000 x3 : 000000000000003f x2 : 0000000000000040 x1 : ffffffff80001000 x0 : ffffffff80000000 Call trace: __clean_dcache_area_poc+0x20/0x38 dma_direct_sync_sg_for_device+0xb8/0xe8 get_pages+0x22c/0x250 [msm] msm_gem_get_and_pin_iova+0xdc/0x168 [msm] ... Fixes the combination of two patches: Fixes: 0036bc73ccbe (drm/msm: stop abusing dma_map/unmap for cache) Fixes: 449fa54d6815 (dma-direct: correct the physical addr in dma_direct_sync_sg_for_cpu/device) Tested-by: Stephan Gerhold Signed-off-by: Rob Clark [seanpaul changed subject to something more desriptive] Signed-off-by: Sean Paul Link: https://patchwork.freedesktop.org/patch/msgid/20190730214633.17820-1-robdclark@gmail.com --- drivers/gpu/drm/msm/msm_gem.c | 47 +++++++++++++++++++++++++++++++---- 1 file changed, 42 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index c2114c748c2f..8cf6362e64bf 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -32,6 +32,46 @@ static bool use_pages(struct drm_gem_object *obj) return !msm_obj->vram_node; } +/* + * Cache sync.. this is a bit over-complicated, to fit dma-mapping + * API. Really GPU cache is out of scope here (handled on cmdstream) + * and all we need to do is invalidate newly allocated pages before + * mapping to CPU as uncached/writecombine. + * + * On top of this, we have the added headache, that depending on + * display generation, the display's iommu may be wired up to either + * the toplevel drm device (mdss), or to the mdp sub-node, meaning + * that here we either have dma-direct or iommu ops. + * + * Let this be a cautionary tail of abstraction gone wrong. + */ + +static void sync_for_device(struct msm_gem_object *msm_obj) +{ + struct device *dev = msm_obj->base.dev->dev; + + if (get_dma_ops(dev)) { + dma_sync_sg_for_device(dev, msm_obj->sgt->sgl, + msm_obj->sgt->nents, DMA_BIDIRECTIONAL); + } else { + dma_map_sg(dev, msm_obj->sgt->sgl, + msm_obj->sgt->nents, DMA_BIDIRECTIONAL); + } +} + +static void sync_for_cpu(struct msm_gem_object *msm_obj) +{ + struct device *dev = msm_obj->base.dev->dev; + + if (get_dma_ops(dev)) { + dma_sync_sg_for_cpu(dev, msm_obj->sgt->sgl, + msm_obj->sgt->nents, DMA_BIDIRECTIONAL); + } else { + dma_unmap_sg(dev, msm_obj->sgt->sgl, + msm_obj->sgt->nents, DMA_BIDIRECTIONAL); + } +} + /* allocate pages from VRAM carveout, used when no IOMMU: */ static struct page **get_pages_vram(struct drm_gem_object *obj, int npages) { @@ -97,8 +137,7 @@ static struct page **get_pages(struct drm_gem_object *obj) * because display controller, GPU, etc. are not coherent: */ if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) - dma_sync_sg_for_device(dev->dev, msm_obj->sgt->sgl, - msm_obj->sgt->nents, DMA_BIDIRECTIONAL); + sync_for_device(msm_obj); } return msm_obj->pages; @@ -127,9 +166,7 @@ static void put_pages(struct drm_gem_object *obj) * GPU, etc. are not coherent: */ if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED)) - dma_sync_sg_for_cpu(obj->dev->dev, msm_obj->sgt->sgl, - msm_obj->sgt->nents, - DMA_BIDIRECTIONAL); + sync_for_cpu(msm_obj); sg_free_table(msm_obj->sgt); kfree(msm_obj->sgt); From 2bab52af6fe68c43b327a57e5ce5fc10eefdfadf Mon Sep 17 00:00:00 2001 From: Brian Masney Date: Fri, 31 May 2019 05:46:15 -0400 Subject: [PATCH 15/18] drm/msm: add support for per-CRTC max_vblank_count on mdp5 The mdp5 drm/kms driver currently does not work on command-mode DSI panels due to 'vblank wait timed out' errors. This causes a latency of seconds, or tens of seconds in some cases, before content is shown on the panel. This hardware does not have the something that we can use as a frame counter available when running in command mode, so we need to fall back to using timestamps by setting the max_vblank_count to zero. This can be done on a per-CRTC basis, so the convert mdp5 to use drm_crtc_set_max_vblank_count(). This change was tested on a LG Nexus 5 (hammerhead) phone. Suggested-by: Jeffrey Hugo Reviewed-by: Jeffrey Hugo Signed-off-by: Brian Masney Signed-off-by: Sean Paul Link: https://patchwork.freedesktop.org/patch/msgid/20190531094619.31704-3-masneyb@onstation.org --- drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c | 16 +++++++++++++++- drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c | 2 +- 2 files changed, 16 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c index ff14555372d0..78d5fa230c16 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c @@ -439,6 +439,18 @@ static void mdp5_crtc_atomic_disable(struct drm_crtc *crtc, mdp5_crtc->enabled = false; } +static void mdp5_crtc_vblank_on(struct drm_crtc *crtc) +{ + struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state); + struct mdp5_interface *intf = mdp5_cstate->pipeline.intf; + u32 count; + + count = intf->mode == MDP5_INTF_DSI_MODE_COMMAND ? 0 : 0xffffffff; + drm_crtc_set_max_vblank_count(crtc, count); + + drm_crtc_vblank_on(crtc); +} + static void mdp5_crtc_atomic_enable(struct drm_crtc *crtc, struct drm_crtc_state *old_state) { @@ -475,7 +487,7 @@ static void mdp5_crtc_atomic_enable(struct drm_crtc *crtc, } /* Restore vblank irq handling after power is enabled */ - drm_crtc_vblank_on(crtc); + mdp5_crtc_vblank_on(crtc); mdp5_crtc_mode_set_nofb(crtc); @@ -1028,6 +1040,8 @@ static void mdp5_crtc_reset(struct drm_crtc *crtc) mdp5_crtc_destroy_state(crtc, crtc->state); __drm_atomic_helper_crtc_reset(crtc, &mdp5_cstate->base); + + drm_crtc_vblank_reset(crtc); } static const struct drm_crtc_funcs mdp5_crtc_funcs = { diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c index 4a60f5fca6b0..fec6ef1ae3b9 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c @@ -740,7 +740,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev) dev->driver->get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos; dev->driver->get_scanout_position = mdp5_get_scanoutpos; dev->driver->get_vblank_counter = mdp5_get_vblank_counter; - dev->max_vblank_count = 0xffffffff; + dev->max_vblank_count = 0; /* max_vblank_count is set on each CRTC */ dev->vblank_disable_immediate = true; return kms; From c14b5dce5ece48035cfd0aa951b39c69ad5056f4 Mon Sep 17 00:00:00 2001 From: Jordan Crouse Date: Thu, 25 Jul 2019 10:53:08 -0600 Subject: [PATCH 16/18] drm/msm: Annotate intentional switch statement fall throughs Explicitly mark intentional fall throughs in switch statements to keep -Wimplicit-fallthrough from complaining. Reviewed-by: Rob Clark Signed-off-by: Jordan Crouse Signed-off-by: Sean Paul Link: https://patchwork.freedesktop.org/patch/msgid/1564073588-27386-1-git-send-email-jcrouse@codeaurora.org --- drivers/gpu/drm/msm/adreno/a5xx_gpu.c | 2 ++ drivers/gpu/drm/msm/adreno/a6xx_gpu.c | 1 + drivers/gpu/drm/msm/adreno/adreno_gpu.c | 1 + 3 files changed, 4 insertions(+) diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c index 1671db47aa57..e9c55d1d6c04 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c @@ -59,6 +59,7 @@ static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: if (priv->lastctx == ctx) break; + /* fall-thru */ case MSM_SUBMIT_CMD_BUF: /* copy commands into RB: */ obj = submit->bos[submit->cmd[i].idx].obj; @@ -149,6 +150,7 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: if (priv->lastctx == ctx) break; + /* fall-thru */ case MSM_SUBMIT_CMD_BUF: OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3); OUT_RING(ring, lower_32_bits(submit->cmd[i].iova)); diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c index be39cf01e51e..dc8ec2c94301 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c @@ -115,6 +115,7 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: if (priv->lastctx == ctx) break; + /* fall-thru */ case MSM_SUBMIT_CMD_BUF: OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3); OUT_RING(ring, lower_32_bits(submit->cmd[i].iova)); diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index 9acbbc0f3232..048c8be426f3 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -428,6 +428,7 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, /* ignore if there has not been a ctx switch: */ if (priv->lastctx == ctx) break; + /* fall-thru */ case MSM_SUBMIT_CMD_BUF: OUT_PKT3(ring, adreno_is_a430(adreno_gpu) ? CP_INDIRECT_BUFFER_PFE : CP_INDIRECT_BUFFER_PFD, 2); From 9ca7ad6c7706edeae331c1632d0c63897418ebad Mon Sep 17 00:00:00 2001 From: Jeffrey Hugo Date: Wed, 26 Jun 2019 11:00:15 -0700 Subject: [PATCH 17/18] drm: msm: Fix add_gpu_components add_gpu_components() adds found GPU nodes from the DT to the match list, regardless of the status of the nodes. This is a problem, because if the nodes are disabled, they should not be on the match list because they will not be matched. This prevents display from initing if a GPU node is defined, but it's status is disabled. Fix this by checking the node's status before adding it to the match list. Fixes: dc3ea265b856 (drm/msm: Drop the gpu binding) Reviewed-by: Rob Clark Signed-off-by: Jeffrey Hugo Signed-off-by: Sean Paul Link: https://patchwork.freedesktop.org/patch/msgid/20190626180015.45242-1-jeffrey.l.hugo@gmail.com --- drivers/gpu/drm/msm/msm_drv.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index c226156f2dea..c356f5ccf253 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -1279,7 +1279,8 @@ static int add_gpu_components(struct device *dev, if (!np) return 0; - drm_of_component_match_add(dev, matchptr, compare_of, np); + if (of_device_is_available(np)) + drm_of_component_match_add(dev, matchptr, compare_of, np); of_node_put(np); From 412e85b605315fd129a849599cf4a5a7959573a8 Mon Sep 17 00:00:00 2001 From: Lyude Paul Date: Thu, 1 Aug 2019 18:02:15 -0400 Subject: [PATCH 18/18] drm/nouveau: Only release VCPI slots on mode changes Looks like a regression got introduced into nv50_mstc_atomic_check() that somehow didn't get found until now. If userspace changes crtc_state->active to false but leaves the CRTC enabled, we end up calling drm_dp_atomic_find_vcpi_slots() using the PBN calculated in asyh->dp.pbn. However, if the display is inactive we end up calculating a PBN of 0, which inadvertently causes us to have an allocation of 0. >From there, if userspace then disables the CRTC afterwards we end up accidentally attempting to free the VCPI twice: WARNING: CPU: 0 PID: 1484 at drivers/gpu/drm/drm_dp_mst_topology.c:3336 drm_dp_atomic_release_vcpi_slots+0x87/0xb0 [drm_kms_helper] RIP: 0010:drm_dp_atomic_release_vcpi_slots+0x87/0xb0 [drm_kms_helper] Call Trace: drm_atomic_helper_check_modeset+0x3f3/0xa60 [drm_kms_helper] ? drm_atomic_check_only+0x43/0x780 [drm] drm_atomic_helper_check+0x15/0x90 [drm_kms_helper] nv50_disp_atomic_check+0x83/0x1d0 [nouveau] drm_atomic_check_only+0x54d/0x780 [drm] ? drm_atomic_set_crtc_for_connector+0xec/0x100 [drm] drm_atomic_commit+0x13/0x50 [drm] drm_atomic_helper_set_config+0x81/0x90 [drm_kms_helper] drm_mode_setcrtc+0x194/0x6a0 [drm] ? vprintk_emit+0x16a/0x230 ? drm_ioctl+0x163/0x390 [drm] ? drm_mode_getcrtc+0x180/0x180 [drm] drm_ioctl_kernel+0xaa/0xf0 [drm] drm_ioctl+0x208/0x390 [drm] ? drm_mode_getcrtc+0x180/0x180 [drm] nouveau_drm_ioctl+0x63/0xb0 [nouveau] do_vfs_ioctl+0x405/0x660 ? recalc_sigpending+0x17/0x50 ? _copy_from_user+0x37/0x60 ksys_ioctl+0x5e/0x90 ? exit_to_usermode_loop+0x92/0xe0 __x64_sys_ioctl+0x16/0x20 do_syscall_64+0x59/0x190 entry_SYSCALL_64_after_hwframe+0x44/0xa9 WARNING: CPU: 0 PID: 1484 at drivers/gpu/drm/drm_dp_mst_topology.c:3336 drm_dp_atomic_release_vcpi_slots+0x87/0xb0 [drm_kms_helper] ---[ end trace 4c395c0c51b1f88d ]--- [drm:drm_dp_atomic_release_vcpi_slots [drm_kms_helper]] *ERROR* no VCPI for [MST PORT:00000000e288eb7d] found in mst state 000000008e642070 So, fix this by doing what we probably should have done from the start: only call drm_dp_atomic_find_vcpi_slots() when crtc_state->mode_changed is set, so that VCPI allocations remain for as long as the CRTC is enabled. Signed-off-by: Lyude Paul Fixes: 232c9eec417a ("drm/nouveau: Use atomic VCPI helpers for MST") Cc: Lyude Paul Cc: Ben Skeggs Cc: Daniel Vetter Cc: David Airlie Cc: Jerry Zuo Cc: Harry Wentland Cc: Juston Li Cc: Karol Herbst Cc: Laurent Pinchart Cc: Ilia Mirkin Cc: # v5.1+ Acked-by: Ben Skeggs Signed-off-by: Dave Airlie Link: https://patchwork.freedesktop.org/patch/msgid/20190801220216.15323-1-lyude@redhat.com --- drivers/gpu/drm/nouveau/dispnv50/disp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c index 8497768f1b41..126703816794 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c @@ -780,7 +780,7 @@ nv50_msto_atomic_check(struct drm_encoder *encoder, drm_dp_calc_pbn_mode(crtc_state->adjusted_mode.clock, connector->display_info.bpc * 3); - if (drm_atomic_crtc_needs_modeset(crtc_state)) { + if (crtc_state->mode_changed) { slots = drm_dp_atomic_find_vcpi_slots(state, &mstm->mgr, mstc->port, asyh->dp.pbn);