drm/i915: Add flag to i915_add_request() to skip the cache flush
In order to explcitly track all GPU work (and completely remove the outstanding lazy request), it is necessary to add extra i915_add_request() calls to various places. Some of these do not need the implicit cache flush done as part of the standard batch buffer submission process. This patch adds a flag to _add_request() to specify whether the flush is required or not. For: VIZ-5115 Signed-off-by: John Harrison <John.C.Harrison@Intel.com> Reviewed-by: Tomas Elf <tomas.elf@intel.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
This commit is contained in:
parent
8a8edb5917
commit
5b4a60c276
|
@ -2890,9 +2890,12 @@ int __must_check i915_gpu_idle(struct drm_device *dev);
|
|||
int __must_check i915_gem_suspend(struct drm_device *dev);
|
||||
void __i915_add_request(struct intel_engine_cs *ring,
|
||||
struct drm_file *file,
|
||||
struct drm_i915_gem_object *batch_obj);
|
||||
struct drm_i915_gem_object *batch_obj,
|
||||
bool flush_caches);
|
||||
#define i915_add_request(ring) \
|
||||
__i915_add_request(ring, NULL, NULL)
|
||||
__i915_add_request(ring, NULL, NULL, true)
|
||||
#define i915_add_request_no_flush(ring) \
|
||||
__i915_add_request(ring, NULL, NULL, false)
|
||||
int __i915_wait_request(struct drm_i915_gem_request *req,
|
||||
unsigned reset_counter,
|
||||
bool interruptible,
|
||||
|
|
|
@ -2470,7 +2470,8 @@ i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
|
|||
*/
|
||||
void __i915_add_request(struct intel_engine_cs *ring,
|
||||
struct drm_file *file,
|
||||
struct drm_i915_gem_object *obj)
|
||||
struct drm_i915_gem_object *obj,
|
||||
bool flush_caches)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = ring->dev->dev_private;
|
||||
struct drm_i915_gem_request *request;
|
||||
|
@ -2502,12 +2503,14 @@ void __i915_add_request(struct intel_engine_cs *ring,
|
|||
* is that the flush _must_ happen before the next request, no matter
|
||||
* what.
|
||||
*/
|
||||
if (i915.enable_execlists)
|
||||
ret = logical_ring_flush_all_caches(ringbuf, request->ctx);
|
||||
else
|
||||
ret = intel_ring_flush_all_caches(ring);
|
||||
/* Not allowed to fail! */
|
||||
WARN(ret, "*_ring_flush_all_caches failed: %d!\n", ret);
|
||||
if (flush_caches) {
|
||||
if (i915.enable_execlists)
|
||||
ret = logical_ring_flush_all_caches(ringbuf, request->ctx);
|
||||
else
|
||||
ret = intel_ring_flush_all_caches(ring);
|
||||
/* Not allowed to fail! */
|
||||
WARN(ret, "*_ring_flush_all_caches failed: %d!\n", ret);
|
||||
}
|
||||
|
||||
/* Record the position of the start of the request so that
|
||||
* should we detect the updated seqno part-way through the
|
||||
|
|
|
@ -1066,7 +1066,7 @@ i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params)
|
|||
params->ring->gpu_caches_dirty = true;
|
||||
|
||||
/* Add a breadcrumb for the completion of the batch buffer */
|
||||
__i915_add_request(params->ring, params->file, params->batch_obj);
|
||||
__i915_add_request(params->ring, params->file, params->batch_obj, true);
|
||||
}
|
||||
|
||||
static int
|
||||
|
|
|
@ -173,7 +173,7 @@ int i915_gem_render_state_init(struct intel_engine_cs *ring)
|
|||
|
||||
i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), ring);
|
||||
|
||||
__i915_add_request(ring, NULL, so.obj);
|
||||
__i915_add_request(ring, NULL, so.obj, true);
|
||||
/* __i915_add_request moves object to inactive if it fails */
|
||||
out:
|
||||
i915_gem_render_state_fini(&so);
|
||||
|
|
|
@ -1599,7 +1599,7 @@ static int intel_lr_context_render_state_init(struct intel_engine_cs *ring,
|
|||
|
||||
i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), ring);
|
||||
|
||||
__i915_add_request(ring, file, so.obj);
|
||||
__i915_add_request(ring, file, so.obj, true);
|
||||
/* intel_logical_ring_add_request moves object to inactive if it
|
||||
* fails */
|
||||
out:
|
||||
|
|
Loading…
Reference in a new issue