1
0
Fork 0

drm/amdgpu: XGMI pstate switch initial support

Driver vote low to high pstate switch whenever there is an outstanding
XGMI mapping request. Driver vote high to low pstate when all the
outstanding XGMI mapping is terminated.

Signed-off-by: shaoyunl <shaoyun.liu@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
hifive-unleashed-5.2
shaoyunl 2019-03-20 16:14:56 -04:00 committed by Alex Deucher
parent adc7e863f6
commit df399b0641
6 changed files with 56 additions and 1 deletions

View File

@ -2018,6 +2018,10 @@ static void amdgpu_device_ip_late_init_func_handler(struct work_struct *work)
r = amdgpu_device_enable_mgpu_fan_boost();
if (r)
DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
/*set to low pstate by default */
amdgpu_xgmi_set_pstate(adev, 0);
}
static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)

View File

@ -72,6 +72,8 @@ struct amdgpu_bo_va {
/* If the mappings are cleared or filled */
bool cleared;
bool is_xgmi;
};
struct amdgpu_bo {

View File

@ -34,6 +34,7 @@
#include "amdgpu_trace.h"
#include "amdgpu_amdkfd.h"
#include "amdgpu_gmc.h"
#include "amdgpu_xgmi.h"
/**
* DOC: GPUVM
@ -2045,6 +2046,15 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
INIT_LIST_HEAD(&bo_va->valids);
INIT_LIST_HEAD(&bo_va->invalids);
if (bo && amdgpu_xgmi_same_hive(adev, amdgpu_ttm_adev(bo->tbo.bdev))) {
bo_va->is_xgmi = true;
mutex_lock(&adev->vm_manager.lock_pstate);
/* Power up XGMI if it can be potentially used */
if (++adev->vm_manager.xgmi_map_counter == 1)
amdgpu_xgmi_set_pstate(adev, 1);
mutex_unlock(&adev->vm_manager.lock_pstate);
}
return bo_va;
}
@ -2463,6 +2473,14 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
}
dma_fence_put(bo_va->last_pt_update);
if (bo && bo_va->is_xgmi) {
mutex_lock(&adev->vm_manager.lock_pstate);
if (--adev->vm_manager.xgmi_map_counter == 0)
amdgpu_xgmi_set_pstate(adev, 0);
mutex_unlock(&adev->vm_manager.lock_pstate);
}
kfree(bo_va);
}
@ -2970,6 +2988,9 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
idr_init(&adev->vm_manager.pasid_idr);
spin_lock_init(&adev->vm_manager.pasid_lock);
adev->vm_manager.xgmi_map_counter = 0;
mutex_init(&adev->vm_manager.lock_pstate);
}
/**

View File

@ -324,6 +324,10 @@ struct amdgpu_vm_manager {
*/
struct idr pasid_idr;
spinlock_t pasid_lock;
/* counter of mapped memory through xgmi */
uint32_t xgmi_map_counter;
struct mutex lock_pstate;
};
#define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count)))

View File

@ -200,12 +200,26 @@ struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev, int lo
if (lock)
mutex_lock(&tmp->hive_lock);
tmp->pstate = -1;
mutex_unlock(&xgmi_mutex);
return tmp;
}
int amdgpu_xgmi_set_pstate(struct amdgpu_device *adev, int pstate)
{
int ret = 0;
struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev, 0);
if (!hive)
return 0;
if (hive->pstate == pstate)
return 0;
/* Todo : sent the message to SMU for pstate change */
return ret;
}
int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_device *adev)
{
int ret = -EINVAL;

View File

@ -33,11 +33,21 @@ struct amdgpu_hive_info {
struct kobject *kobj;
struct device_attribute dev_attr;
struct amdgpu_device *adev;
int pstate; /*0 -- low , 1 -- high , -1 unknown*/
};
struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev, int lock);
int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_device *adev);
int amdgpu_xgmi_add_device(struct amdgpu_device *adev);
void amdgpu_xgmi_remove_device(struct amdgpu_device *adev);
int amdgpu_xgmi_set_pstate(struct amdgpu_device *adev, int pstate);
static inline bool amdgpu_xgmi_same_hive(struct amdgpu_device *adev,
struct amdgpu_device *bo_adev)
{
return (adev != bo_adev &&
adev->gmc.xgmi.hive_id &&
adev->gmc.xgmi.hive_id == bo_adev->gmc.xgmi.hive_id);
}
#endif