1
0
Fork 0

drm/amdgpu: support dpm level modification under virtualization v3

Under vega10 virtualuzation, smu ip block will not be added.
Therefore, we need add pp clk query and force dpm level function
at amdgpu_virt_ops to support the feature.

v2: add get_pp_clk existence check and use kzalloc to allocate buf

v3: return -ENOMEM for allocation failure and correct the coding style

Signed-off-by: Yintian Tao <yttao@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
hifive-unleashed-5.2
Yintian Tao 2019-04-09 20:33:20 +08:00 committed by Alex Deucher
parent b0960c3592
commit bb5a2bdf36
7 changed files with 165 additions and 0 deletions

View File

@ -2471,6 +2471,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
mutex_init(&adev->virt.vf_errors.lock);
hash_init(adev->mn_hash);
mutex_init(&adev->lock_reset);
mutex_init(&adev->virt.dpm_mutex);
amdgpu_device_check_arguments(adev);

View File

@ -696,6 +696,10 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
if (adev->pm.dpm_enabled) {
dev_info.max_engine_clock = amdgpu_dpm_get_sclk(adev, false) * 10;
dev_info.max_memory_clock = amdgpu_dpm_get_mclk(adev, false) * 10;
} else if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev) &&
adev->virt.ops->get_pp_clk) {
dev_info.max_engine_clock = amdgpu_virt_get_sclk(adev, false) * 10;
dev_info.max_memory_clock = amdgpu_virt_get_mclk(adev, false) * 10;
} else {
dev_info.max_engine_clock = adev->clock.default_sclk * 10;
dev_info.max_memory_clock = adev->clock.default_mclk * 10;

View File

@ -327,6 +327,18 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
goto fail;
}
if (amdgpu_sriov_vf(adev)) {
if (amdgim_is_hwperf(adev) &&
adev->virt.ops->force_dpm_level) {
mutex_lock(&adev->pm.mutex);
adev->virt.ops->force_dpm_level(adev, level);
mutex_unlock(&adev->pm.mutex);
return count;
} else {
return -EINVAL;
}
}
if (current_level == level)
return count;
@ -790,6 +802,10 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev) &&
adev->virt.ops->get_pp_clk)
return adev->virt.ops->get_pp_clk(adev, PP_SCLK, buf);
if (is_support_sw_smu(adev))
return smu_print_clk_levels(&adev->smu, PP_SCLK, buf);
else if (adev->powerplay.pp_funcs->print_clock_levels)

View File

@ -375,4 +375,53 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
}
}
static uint32_t parse_clk(char *buf, bool min)
{
char *ptr = buf;
uint32_t clk = 0;
do {
ptr = strchr(ptr, ':');
if (!ptr)
break;
ptr+=2;
clk = simple_strtoul(ptr, NULL, 10);
} while (!min);
return clk * 100;
}
uint32_t amdgpu_virt_get_sclk(struct amdgpu_device *adev, bool lowest)
{
char *buf = NULL;
uint32_t clk = 0;
buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!buf)
return -ENOMEM;
adev->virt.ops->get_pp_clk(adev, PP_SCLK, buf);
clk = parse_clk(buf, lowest);
kfree(buf);
return clk;
}
uint32_t amdgpu_virt_get_mclk(struct amdgpu_device *adev, bool lowest)
{
char *buf = NULL;
uint32_t clk = 0;
buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!buf)
return -ENOMEM;
adev->virt.ops->get_pp_clk(adev, PP_MCLK, buf);
clk = parse_clk(buf, lowest);
kfree(buf);
return clk;
}

View File

@ -57,6 +57,8 @@ struct amdgpu_virt_ops {
int (*reset_gpu)(struct amdgpu_device *adev);
int (*wait_reset)(struct amdgpu_device *adev);
void (*trans_msg)(struct amdgpu_device *adev, u32 req, u32 data1, u32 data2, u32 data3);
int (*get_pp_clk)(struct amdgpu_device *adev, u32 type, char *buf);
int (*force_dpm_level)(struct amdgpu_device *adev, u32 level);
};
/*
@ -83,6 +85,8 @@ enum AMDGIM_FEATURE_FLAG {
AMDGIM_FEATURE_GIM_LOAD_UCODES = 0x2,
/* VRAM LOST by GIM */
AMDGIM_FEATURE_GIM_FLR_VRAMLOST = 0x4,
/* HW PERF SIM in GIM */
AMDGIM_FEATURE_HW_PERF_SIMULATION = (1 << 3),
};
struct amd_sriov_msg_pf2vf_info_header {
@ -252,6 +256,8 @@ struct amdgpu_virt {
struct amdgpu_vf_error_buffer vf_errors;
struct amdgpu_virt_fw_reserve fw_reserve;
uint32_t gim_feature;
/* protect DPM events to GIM */
struct mutex dpm_mutex;
};
#define amdgpu_sriov_enabled(adev) \
@ -278,6 +284,9 @@ static inline bool is_virtual_machine(void)
#endif
}
#define amdgim_is_hwperf(adev) \
((adev)->virt.gim_feature & AMDGIM_FEATURE_HW_PERF_SIMULATION)
bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev);
void amdgpu_virt_init_setting(struct amdgpu_device *adev);
uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg);
@ -295,5 +304,7 @@ int amdgpu_virt_fw_reserve_get_checksum(void *obj, unsigned long obj_size,
unsigned int key,
unsigned int chksum);
void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev);
uint32_t amdgpu_virt_get_sclk(struct amdgpu_device *adev, bool lowest);
uint32_t amdgpu_virt_get_mclk(struct amdgpu_device *adev, bool lowest);
#endif

View File

@ -157,6 +157,82 @@ static void xgpu_ai_mailbox_trans_msg (struct amdgpu_device *adev,
xgpu_ai_mailbox_set_valid(adev, false);
}
static int xgpu_ai_get_pp_clk(struct amdgpu_device *adev, u32 type, char *buf)
{
int r = 0;
u32 req, val, size;
if (!amdgim_is_hwperf(adev) || buf == NULL)
return -EBADRQC;
switch(type) {
case PP_SCLK:
req = IDH_IRQ_GET_PP_SCLK;
break;
case PP_MCLK:
req = IDH_IRQ_GET_PP_MCLK;
break;
default:
return -EBADRQC;
}
mutex_lock(&adev->virt.dpm_mutex);
xgpu_ai_mailbox_trans_msg(adev, req, 0, 0, 0);
r = xgpu_ai_poll_msg(adev, IDH_SUCCESS);
if (!r && adev->fw_vram_usage.va != NULL) {
val = RREG32_NO_KIQ(
SOC15_REG_OFFSET(NBIO, 0,
mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW1));
size = strnlen((((char *)adev->virt.fw_reserve.p_pf2vf) +
val), PAGE_SIZE);
if (size < PAGE_SIZE)
strcpy(buf,((char *)adev->virt.fw_reserve.p_pf2vf + val));
else
size = 0;
r = size;
goto out;
}
r = xgpu_ai_poll_msg(adev, IDH_FAIL);
if(r)
pr_info("%s DPM request failed",
(type == PP_SCLK)? "SCLK" : "MCLK");
out:
mutex_unlock(&adev->virt.dpm_mutex);
return r;
}
static int xgpu_ai_force_dpm_level(struct amdgpu_device *adev, u32 level)
{
int r = 0;
u32 req = IDH_IRQ_FORCE_DPM_LEVEL;
if (!amdgim_is_hwperf(adev))
return -EBADRQC;
mutex_lock(&adev->virt.dpm_mutex);
xgpu_ai_mailbox_trans_msg(adev, req, level, 0, 0);
r = xgpu_ai_poll_msg(adev, IDH_SUCCESS);
if (!r)
goto out;
r = xgpu_ai_poll_msg(adev, IDH_FAIL);
if (!r)
pr_info("DPM request failed");
else
pr_info("Mailbox is broken");
out:
mutex_unlock(&adev->virt.dpm_mutex);
return r;
}
static int xgpu_ai_send_access_requests(struct amdgpu_device *adev,
enum idh_request req)
{
@ -375,4 +451,6 @@ const struct amdgpu_virt_ops xgpu_ai_virt_ops = {
.reset_gpu = xgpu_ai_request_reset,
.wait_reset = NULL,
.trans_msg = xgpu_ai_mailbox_trans_msg,
.get_pp_clk = xgpu_ai_get_pp_clk,
.force_dpm_level = xgpu_ai_force_dpm_level,
};

View File

@ -35,6 +35,10 @@ enum idh_request {
IDH_REL_GPU_FINI_ACCESS,
IDH_REQ_GPU_RESET_ACCESS,
IDH_IRQ_FORCE_DPM_LEVEL = 10,
IDH_IRQ_GET_PP_SCLK,
IDH_IRQ_GET_PP_MCLK,
IDH_LOG_VF_ERROR = 200,
};
@ -43,6 +47,8 @@ enum idh_event {
IDH_READY_TO_ACCESS_GPU,
IDH_FLR_NOTIFICATION,
IDH_FLR_NOTIFICATION_CMPL,
IDH_SUCCESS,
IDH_FAIL,
IDH_EVENT_MAX
};