From b43a9a7e87d2bbb8d0c6ae4ff06dcc604f00e31a Mon Sep 17 00:00:00 2001 From: Chunming Zhou Date: Tue, 21 Jul 2015 15:13:53 +0800 Subject: [PATCH] drm/amdgpu: use scheduler user seq instead of previous user seq Signed-off-by: Chunming Zhou Acked-by: Christian K?nig Reviewed-by: Jammy Zhou --- drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 26 +++++++++++++++++++------ 1 file changed, 20 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index 557fb60f416b..b9be250cb206 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -225,10 +225,16 @@ uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring, struct fence *fence) { struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx]; - uint64_t seq = cring->sequence; - unsigned idx = seq % AMDGPU_CTX_MAX_CS_PENDING; - struct fence *other = cring->fences[idx]; + uint64_t seq = 0; + unsigned idx = 0; + struct fence *other = NULL; + if (amdgpu_enable_scheduler) + seq = atomic64_read(&cring->c_entity.last_queued_v_seq); + else + seq = cring->sequence; + idx = seq % AMDGPU_CTX_MAX_CS_PENDING; + other = cring->fences[idx]; if (other) { signed long r; r = fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT); @@ -240,7 +246,8 @@ uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring, spin_lock(&ctx->ring_lock); cring->fences[idx] = fence; - cring->sequence++; + if (!amdgpu_enable_scheduler) + cring->sequence++; spin_unlock(&ctx->ring_lock); fence_put(other); @@ -253,14 +260,21 @@ struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, { struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx]; struct fence *fence; + uint64_t queued_seq; spin_lock(&ctx->ring_lock); - if (seq >= cring->sequence) { + if (amdgpu_enable_scheduler) + queued_seq = atomic64_read(&cring->c_entity.last_queued_v_seq) + 1; + else + queued_seq = cring->sequence; + + if (seq >= queued_seq) { spin_unlock(&ctx->ring_lock); return ERR_PTR(-EINVAL); } - if (seq + AMDGPU_CTX_MAX_CS_PENDING < cring->sequence) { + + if (seq + AMDGPU_CTX_MAX_CS_PENDING < queued_seq) { spin_unlock(&ctx->ring_lock); return NULL; }