1
0
Fork 0

drm/sched: add drm_sched_start_timeout helper

Cleanup starting the timeout a bit.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Nayan Deshmukh <nayan26deshmukh@gmail.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
hifive-unleashed-5.1
Christian König 2018-10-05 19:56:39 +02:00 committed by Alex Deucher
parent 7a862028b9
commit b981c86f03
1 changed files with 17 additions and 12 deletions

View File

@ -182,6 +182,20 @@ bool drm_sched_dependency_optimized(struct dma_fence* fence,
}
EXPORT_SYMBOL(drm_sched_dependency_optimized);
/**
* drm_sched_start_timeout - start timeout for reset worker
*
* @sched: scheduler instance to start the worker for
*
* Start the timeout for the given scheduler.
*/
static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched)
{
if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
!list_empty(&sched->ring_mirror_list))
schedule_delayed_work(&sched->work_tdr, sched->timeout);
}
/* job_finish is called after hw fence signaled
*/
static void drm_sched_job_finish(struct work_struct *work)
@ -203,9 +217,7 @@ static void drm_sched_job_finish(struct work_struct *work)
/* remove job from ring_mirror_list */
list_del(&s_job->node);
/* queue TDR for next job */
if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
!list_empty(&sched->ring_mirror_list))
schedule_delayed_work(&sched->work_tdr, sched->timeout);
drm_sched_start_timeout(sched);
spin_unlock(&sched->job_list_lock);
dma_fence_put(&s_job->s_fence->finished);
@ -229,10 +241,7 @@ static void drm_sched_job_begin(struct drm_sched_job *s_job)
spin_lock(&sched->job_list_lock);
list_add_tail(&s_job->node, &sched->ring_mirror_list);
if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
list_first_entry_or_null(&sched->ring_mirror_list,
struct drm_sched_job, node) == s_job)
schedule_delayed_work(&sched->work_tdr, sched->timeout);
drm_sched_start_timeout(sched);
spin_unlock(&sched->job_list_lock);
}
@ -313,11 +322,6 @@ void drm_sched_job_recovery(struct drm_gpu_scheduler *sched)
int r;
spin_lock(&sched->job_list_lock);
s_job = list_first_entry_or_null(&sched->ring_mirror_list,
struct drm_sched_job, node);
if (s_job && sched->timeout != MAX_SCHEDULE_TIMEOUT)
schedule_delayed_work(&sched->work_tdr, sched->timeout);
list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
struct drm_sched_fence *s_fence = s_job->s_fence;
struct dma_fence *fence;
@ -350,6 +354,7 @@ void drm_sched_job_recovery(struct drm_gpu_scheduler *sched)
}
spin_lock(&sched->job_list_lock);
}
drm_sched_start_timeout(sched);
spin_unlock(&sched->job_list_lock);
}
EXPORT_SYMBOL(drm_sched_job_recovery);