1
0
Fork 0

drm/i915: extract object active state flushing code

Both busy_ioctl and the new wait_ioct need to do the same dance (or at
least should). Some slight changes:
- busy_ioctl now unconditionally checks for olr. Before emitting a
  require flush would have prevent the olr check and hence required a
  second call to the busy ioctl to really emit the request.
- the timeout wait now also retires request. Not really required for
  abi-reasons, but makes a notch more sense imo.

I've tested this by pimping the i-g-t test some more and also checking
the polling behviour of the wait_rendering_timeout ioctl versus what
busy_ioctl returns.

v2: Too many people complained about unplug, new color is
flush_active.

v3: Kill the comment about the unplug moniker.

v4: s/un-active/inactive/

Reviewed-by: Ben Widawsky <ben@bwidawsk.net>
Signed-Off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
hifive-unleashed-5.1
Daniel Vetter 2012-06-01 15:21:23 +02:00
parent 493a708179
commit 30dfebf34b
1 changed files with 29 additions and 32 deletions

View File

@ -2029,6 +2029,31 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
return 0;
}
/**
* Ensures that an object will eventually get non-busy by flushing any required
* write domains, emitting any outstanding lazy request and retiring and
* completed requests.
*/
static int
i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
{
int ret;
if (obj->active) {
ret = i915_gem_object_flush_gpu_write_domain(obj);
if (ret)
return ret;
ret = i915_gem_check_olr(obj->ring,
obj->last_rendering_seqno);
if (ret)
return ret;
i915_gem_retire_requests_ring(obj->ring);
}
return 0;
}
/**
* i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
* @DRM_IOCTL_ARGS: standard ioctl arguments
@ -2073,11 +2098,8 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
return -ENOENT;
}
/* Need to make sure the object is flushed first. This non-obvious
* flush is required to enforce that (active && !olr) == no wait
* necessary.
*/
ret = i915_gem_object_flush_gpu_write_domain(obj);
/* Need to make sure the object gets inactive eventually. */
ret = i915_gem_object_flush_active(obj);
if (ret)
goto out;
@ -2089,10 +2111,6 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
if (seqno == 0)
goto out;
ret = i915_gem_check_olr(ring, seqno);
if (ret)
goto out;
/* Do this after OLR check to make sure we make forward progress polling
* on this IOCTL with a 0 timeout (like busy ioctl)
*/
@ -3330,30 +3348,9 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
* become non-busy without any further actions, therefore emit any
* necessary flushes here.
*/
ret = i915_gem_object_flush_active(obj);
args->busy = obj->active;
if (args->busy) {
/* Unconditionally flush objects, even when the gpu still uses this
* object. Userspace calling this function indicates that it wants to
* use this buffer rather sooner than later, so issuing the required
* flush earlier is beneficial.
*/
if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
ret = i915_gem_flush_ring(obj->ring,
0, obj->base.write_domain);
} else {
ret = i915_gem_check_olr(obj->ring,
obj->last_rendering_seqno);
}
/* Update the active list for the hardware's current position.
* Otherwise this only updates on a delayed timer or when irqs
* are actually unmasked, and our working set ends up being
* larger than required.
*/
i915_gem_retire_requests_ring(obj->ring);
args->busy = obj->active;
}
drm_gem_object_unreference(&obj->base);
unlock: