1
0
Fork 0

drm/amdgpu: block scheduler when gpu reset

Signed-off-by: Chunming Zhou <David1.Zhou@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
hifive-unleashed-5.1
Chunming Zhou 2016-06-12 15:41:58 +08:00 committed by Alex Deucher
parent eafc9c2704
commit 0875dc9e80
2 changed files with 27 additions and 5 deletions

View File

@ -25,6 +25,7 @@
* Alex Deucher
* Jerome Glisse
*/
#include <linux/kthread.h>
#include <linux/console.h>
#include <linux/slab.h>
#include <linux/debugfs.h>
@ -1895,6 +1896,14 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
atomic_inc(&adev->gpu_reset_counter);
/* block scheduler */
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
if (!ring)
continue;
kthread_park(ring->sched.thread);
}
/* block TTM */
resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
@ -1928,7 +1937,7 @@ retry:
struct amdgpu_ring *ring = adev->rings[i];
if (!ring)
continue;
kthread_unpark(ring->sched.thread);
amdgpu_ring_restore(ring, ring_sizes[i], ring_data[i]);
ring_sizes[i] = 0;
ring_data[i] = NULL;
@ -1946,8 +1955,10 @@ retry:
} else {
amdgpu_fence_driver_force_completion(adev);
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
if (adev->rings[i])
if (adev->rings[i]) {
kthread_unpark(adev->rings[i]->sched.thread);
kfree(ring_data[i]);
}
}
}

View File

@ -476,6 +476,16 @@ static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
wake_up_interruptible(&sched->wake_up_worker);
}
static bool amd_sched_blocked(struct amd_gpu_scheduler *sched)
{
if (kthread_should_park()) {
kthread_parkme();
return true;
}
return false;
}
static int amd_sched_main(void *param)
{
struct sched_param sparam = {.sched_priority = 1};
@ -485,14 +495,15 @@ static int amd_sched_main(void *param)
sched_setscheduler(current, SCHED_FIFO, &sparam);
while (!kthread_should_stop()) {
struct amd_sched_entity *entity;
struct amd_sched_entity *entity = NULL;
struct amd_sched_fence *s_fence;
struct amd_sched_job *sched_job;
struct fence *fence;
wait_event_interruptible(sched->wake_up_worker,
(entity = amd_sched_select_entity(sched)) ||
kthread_should_stop());
(!amd_sched_blocked(sched) &&
(entity = amd_sched_select_entity(sched))) ||
kthread_should_stop());
if (!entity)
continue;