1
0
Fork 0

drm/nouveau/gem: attach fences to VMAs to track GPU usage

An upcoming patch will use these to fix issues related to the deferred
unmapping of GEM objects.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
hifive-unleashed-5.1
Ben Skeggs 2018-05-08 20:39:47 +10:00
parent 19ca10d82e
commit 0db912af8f
3 changed files with 16 additions and 1 deletions

View File

@ -99,6 +99,7 @@ struct nouveau_gem_object_unmap {
static void
nouveau_gem_object_delete(struct nouveau_vma *vma)
{
nouveau_fence_unref(&vma->fence);
nouveau_vma_del(&vma);
}
@ -344,9 +345,20 @@ validate_fini_no_ticket(struct validate_op *op, struct nouveau_fence *fence,
nvbo = list_entry(op->list.next, struct nouveau_bo, entry);
b = &pbbo[nvbo->pbbo_index];
if (likely(fence))
if (likely(fence)) {
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
struct nouveau_vma *vma;
nouveau_bo_fence(nvbo, fence, !!b->write_domains);
if (drm->client.vmm.vmm.object.oclass >= NVIF_CLASS_VMM_NV50) {
vma = (void *)(unsigned long)b->user_priv;
nouveau_fence_unref(&vma->fence);
dma_fence_get(&fence->base);
vma->fence = fence;
}
}
if (unlikely(nvbo->validate_mapped)) {
ttm_bo_kunmap(&nvbo->kmap);
nvbo->validate_mapped = false;

View File

@ -92,6 +92,7 @@ nouveau_vma_new(struct nouveau_bo *nvbo, struct nouveau_vmm *vmm,
vma->refs = 1;
vma->addr = ~0ULL;
vma->mem = NULL;
vma->fence = NULL;
list_add_tail(&vma->head, &nvbo->vma_list);
if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM &&

View File

@ -11,6 +11,8 @@ struct nouveau_vma {
u64 addr;
struct nouveau_mem *mem;
struct nouveau_fence *fence;
};
struct nouveau_vma *nouveau_vma_find(struct nouveau_bo *, struct nouveau_vmm *);