1
0
Fork 0

drm/amdgpu: use scheduler user seq instead of previous user seq

Signed-off-by: Chunming Zhou <david1.zhou@amd.com>
Acked-by: Christian K?nig <christian.koenig@amd.com>
Reviewed-by: Jammy Zhou <Jammy.Zhou@amd.com>
hifive-unleashed-5.1
Chunming Zhou 2015-07-21 15:13:53 +08:00 committed by Alex Deucher
parent 049fc527b4
commit b43a9a7e87
1 changed files with 20 additions and 6 deletions

View File

@ -225,10 +225,16 @@ uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
struct fence *fence)
{
struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
uint64_t seq = cring->sequence;
unsigned idx = seq % AMDGPU_CTX_MAX_CS_PENDING;
struct fence *other = cring->fences[idx];
uint64_t seq = 0;
unsigned idx = 0;
struct fence *other = NULL;
if (amdgpu_enable_scheduler)
seq = atomic64_read(&cring->c_entity.last_queued_v_seq);
else
seq = cring->sequence;
idx = seq % AMDGPU_CTX_MAX_CS_PENDING;
other = cring->fences[idx];
if (other) {
signed long r;
r = fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT);
@ -240,7 +246,8 @@ uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
spin_lock(&ctx->ring_lock);
cring->fences[idx] = fence;
cring->sequence++;
if (!amdgpu_enable_scheduler)
cring->sequence++;
spin_unlock(&ctx->ring_lock);
fence_put(other);
@ -253,14 +260,21 @@ struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
{
struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
struct fence *fence;
uint64_t queued_seq;
spin_lock(&ctx->ring_lock);
if (seq >= cring->sequence) {
if (amdgpu_enable_scheduler)
queued_seq = atomic64_read(&cring->c_entity.last_queued_v_seq) + 1;
else
queued_seq = cring->sequence;
if (seq >= queued_seq) {
spin_unlock(&ctx->ring_lock);
return ERR_PTR(-EINVAL);
}
if (seq + AMDGPU_CTX_MAX_CS_PENDING < cring->sequence) {
if (seq + AMDGPU_CTX_MAX_CS_PENDING < queued_seq) {
spin_unlock(&ctx->ring_lock);
return NULL;
}