1
0
Fork 0

drm/amdgpu: use kernel fence diretly in amdgpu_bo_fence

Signed-off-by: Chunming Zhou <david1.zhou@amd.com>
Reviewed-by: Christian K?nig <christian.koenig@amd.com>
Reviewed-by: Jammy Zhou <jammy.zhou@amd.com>
hifive-unleashed-5.1
Chunming Zhou 2015-08-03 11:38:09 +08:00 committed by Alex Deucher
parent 1d7dd229f5
commit e40a31159b
3 changed files with 8 additions and 8 deletions

View File

@ -658,13 +658,13 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
* @shared: true if fence should be added shared
*
*/
void amdgpu_bo_fence(struct amdgpu_bo *bo, struct amdgpu_fence *fence,
void amdgpu_bo_fence(struct amdgpu_bo *bo, struct fence *fence,
bool shared)
{
struct reservation_object *resv = bo->tbo.resv;
if (shared)
reservation_object_add_shared_fence(resv, &fence->base);
reservation_object_add_shared_fence(resv, fence);
else
reservation_object_add_excl_fence(resv, &fence->base);
reservation_object_add_excl_fence(resv, fence);
}

View File

@ -161,7 +161,7 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
struct ttm_mem_reg *new_mem);
int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
void amdgpu_bo_fence(struct amdgpu_bo *bo, struct amdgpu_fence *fence,
void amdgpu_bo_fence(struct amdgpu_bo *bo, struct fence *fence,
bool shared);
/*

View File

@ -320,7 +320,7 @@ static int amdgpu_vm_run_job(
struct amdgpu_cs_parser *sched_job)
{
amdgpu_bo_fence(sched_job->job_param.vm.bo,
sched_job->ibs[sched_job->num_ibs -1].fence, true);
&sched_job->ibs[sched_job->num_ibs -1].fence->base, true);
return 0;
}
@ -397,7 +397,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
r = amdgpu_ib_schedule(adev, 1, ib, AMDGPU_FENCE_OWNER_VM);
if (r)
goto error_free;
amdgpu_bo_fence(bo, ib->fence, true);
amdgpu_bo_fence(bo, &ib->fence->base, true);
}
error_free:
@ -547,7 +547,7 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
amdgpu_ib_free(adev, ib);
return r;
}
amdgpu_bo_fence(pd, ib->fence, true);
amdgpu_bo_fence(pd, &ib->fence->base, true);
}
}
@ -745,7 +745,7 @@ static void amdgpu_vm_fence_pts(struct amdgpu_vm *vm,
end >>= amdgpu_vm_block_size;
for (i = start; i <= end; ++i)
amdgpu_bo_fence(vm->page_tables[i].bo, fence, true);
amdgpu_bo_fence(vm->page_tables[i].bo, &fence->base, true);
}
static int amdgpu_vm_bo_update_mapping_run_job(