1
0
Fork 0

drm: Add helpers for setting up an array of dma_fence dependencies.

I needed to add implicit dependency support for v3d, and Rob Herring
has been working on it for panfrost, and I had recently looked at the
lima implementation so I think this will be a good intersection of
what we all want and simplify our scheduler implementations.

v2: Rebase on xa_limit_32b API change, and tiny checkpatch cleanups on
    the way in (unsigned int vs unsigned, extra return before
    EXPORT_SYMBOL_GPL)

Signed-off-by: Eric Anholt <eric@anholt.net>
Link: https://patchwork.freedesktop.org/patch/msgid/20190401222635.25013-6-eric@anholt.net
Reviewed-and-tested-by: Qiang Yu <yuq825@gmail.com> (v1)
hifive-unleashed-5.2
Eric Anholt 2019-04-01 15:26:33 -07:00
parent c8f005684c
commit 5d5a179d3e
2 changed files with 98 additions and 0 deletions

View File

@ -1367,3 +1367,96 @@ drm_gem_unlock_reservations(struct drm_gem_object **objs, int count,
ww_acquire_fini(acquire_ctx);
}
EXPORT_SYMBOL(drm_gem_unlock_reservations);
/**
* drm_gem_fence_array_add - Adds the fence to an array of fences to be
* waited on, deduplicating fences from the same context.
*
* @fence_array array of dma_fence * for the job to block on.
* @fence the dma_fence to add to the list of dependencies.
*
* Returns:
* 0 on success, or an error on failing to expand the array.
*/
int drm_gem_fence_array_add(struct xarray *fence_array,
struct dma_fence *fence)
{
struct dma_fence *entry;
unsigned long index;
u32 id = 0;
int ret;
if (!fence)
return 0;
/* Deduplicate if we already depend on a fence from the same context.
* This lets the size of the array of deps scale with the number of
* engines involved, rather than the number of BOs.
*/
xa_for_each(fence_array, index, entry) {
if (entry->context != fence->context)
continue;
if (dma_fence_is_later(fence, entry)) {
dma_fence_put(entry);
xa_store(fence_array, index, fence, GFP_KERNEL);
} else {
dma_fence_put(fence);
}
return 0;
}
ret = xa_alloc(fence_array, &id, fence, xa_limit_32b, GFP_KERNEL);
if (ret != 0)
dma_fence_put(fence);
return ret;
}
EXPORT_SYMBOL(drm_gem_fence_array_add);
/**
* drm_gem_fence_array_add_implicit - Adds the implicit dependencies tracked
* in the GEM object's reservation object to an array of dma_fences for use in
* scheduling a rendering job.
*
* This should be called after drm_gem_lock_reservations() on your array of
* GEM objects used in the job but before updating the reservations with your
* own fences.
*
* @fence_array array of dma_fence * for the job to block on.
* @obj the gem object to add new dependencies from.
* @write whether the job might write the object (so we need to depend on
* shared fences in the reservation object).
*/
int drm_gem_fence_array_add_implicit(struct xarray *fence_array,
struct drm_gem_object *obj,
bool write)
{
int ret;
struct dma_fence **fences;
unsigned int i, fence_count;
if (!write) {
struct dma_fence *fence =
reservation_object_get_excl_rcu(obj->resv);
return drm_gem_fence_array_add(fence_array, fence);
}
ret = reservation_object_get_fences_rcu(obj->resv, NULL,
&fence_count, &fences);
if (ret || !fence_count)
return ret;
for (i = 0; i < fence_count; i++) {
ret = drm_gem_fence_array_add(fence_array, fences[i]);
if (ret)
break;
}
for (; i < fence_count; i++)
dma_fence_put(fences[i]);
kfree(fences);
return ret;
}
EXPORT_SYMBOL(drm_gem_fence_array_add_implicit);

View File

@ -390,6 +390,11 @@ int drm_gem_lock_reservations(struct drm_gem_object **objs, int count,
struct ww_acquire_ctx *acquire_ctx);
void drm_gem_unlock_reservations(struct drm_gem_object **objs, int count,
struct ww_acquire_ctx *acquire_ctx);
int drm_gem_fence_array_add(struct xarray *fence_array,
struct dma_fence *fence);
int drm_gem_fence_array_add_implicit(struct xarray *fence_array,
struct drm_gem_object *obj,
bool write);
int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
u32 handle, u64 *offset);
int drm_gem_dumb_destroy(struct drm_file *file,