1
0
Fork 0

drm/amdgpu: add optional fence out-parameter to amdgpu_vm_clear_freed

We will add the fence to freed buffer objects in a later commit, to ensure
that the underlying memory can only be re-used after all references in
page tables have been cleared.

Signed-off-by: Nicolai Hähnle <nicolai.haehnle@amd.com>
Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
hifive-unleashed-5.1
Nicolai Hähnle 2017-03-23 19:36:31 +01:00 committed by Alex Deucher
parent 923d26db85
commit f34678187a
4 changed files with 20 additions and 10 deletions

View File

@ -785,7 +785,7 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
if (r)
return r;
r = amdgpu_vm_clear_freed(adev, vm);
r = amdgpu_vm_clear_freed(adev, vm, NULL);
if (r)
return r;

View File

@ -540,7 +540,7 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
if (r)
goto error;
r = amdgpu_vm_clear_freed(adev, vm);
r = amdgpu_vm_clear_freed(adev, vm, NULL);
if (r)
goto error;

View File

@ -1382,6 +1382,8 @@ static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
*
* @adev: amdgpu_device pointer
* @vm: requested vm
* @fence: optional resulting fence (unchanged if no work needed to be done
* or if an error occurred)
*
* Make sure all freed BOs are cleared in the PT.
* Returns 0 for success.
@ -1389,10 +1391,11 @@ static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
* PTs have to be reserved and mutex must be locked!
*/
int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
struct amdgpu_vm *vm)
struct amdgpu_vm *vm,
struct dma_fence **fence)
{
struct amdgpu_bo_va_mapping *mapping;
struct dma_fence *fence = NULL;
struct dma_fence *f = NULL;
int r;
while (!list_empty(&vm->freed)) {
@ -1401,15 +1404,21 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
list_del(&mapping->list);
r = amdgpu_vm_bo_split_mapping(adev, NULL, 0, NULL, vm, mapping,
0, 0, &fence);
amdgpu_vm_free_mapping(adev, vm, mapping, fence);
0, 0, &f);
amdgpu_vm_free_mapping(adev, vm, mapping, f);
if (r) {
dma_fence_put(fence);
dma_fence_put(f);
return r;
}
}
dma_fence_put(fence);
if (fence && f) {
dma_fence_put(*fence);
*fence = f;
} else {
dma_fence_put(f);
}
return 0;
}

View File

@ -190,7 +190,8 @@ void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id);
int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
struct amdgpu_vm *vm);
int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
struct amdgpu_vm *vm);
struct amdgpu_vm *vm,
struct dma_fence **fence);
int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct amdgpu_sync *sync);
int amdgpu_vm_bo_update(struct amdgpu_device *adev,