1
0
Fork 0

drm/amdgpu: fix old fence check in amdgpu_fence_emit

We don't hold a reference to the old fence, so it can go away
any time we are waiting for it to signal.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
hifive-unleashed-5.2
Christian König 2019-03-29 19:30:23 +01:00 committed by Alex Deucher
parent 1afeb31443
commit 3d2aca8c86
1 changed files with 17 additions and 7 deletions

View File

@ -136,8 +136,9 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_fence *fence;
struct dma_fence *old, **ptr;
struct dma_fence __rcu **ptr;
uint32_t seq;
int r;
fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL);
if (fence == NULL)
@ -153,15 +154,24 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
seq, flags | AMDGPU_FENCE_FLAG_INT);
ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
if (unlikely(rcu_dereference_protected(*ptr, 1))) {
struct dma_fence *old;
rcu_read_lock();
old = dma_fence_get_rcu_safe(ptr);
rcu_read_unlock();
if (old) {
r = dma_fence_wait(old, false);
dma_fence_put(old);
if (r)
return r;
}
}
/* This function can't be called concurrently anyway, otherwise
* emitting the fence would mess up the hardware ring buffer.
*/
old = rcu_dereference_protected(*ptr, 1);
if (old && !dma_fence_is_signaled(old)) {
DRM_INFO("rcu slot is busy\n");
dma_fence_wait(old, false);
}
rcu_assign_pointer(*ptr, dma_fence_get(&fence->base));
*f = &fence->base;