1
0
Fork 0

drm/i915: Unify request submission

Move request submission from emit_request into its own common vfunc
from i915_add_request().

v2: Convert I915_DISPATCH_flags to BIT(x) whilst passing
v3: Rename a few functions to match.
v4: Reenable execlists submission after disabling guc.
v5: Be aware that everyone calls i915_guc_submission_disable()!

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: http://patchwork.freedesktop.org/patch/msgid/1469432687-22756-23-git-send-email-chris@chris-wilson.co.uk
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1470174640-18242-14-git-send-email-chris@chris-wilson.co.uk
hifive-unleashed-5.1
Chris Wilson 2016-08-02 22:50:31 +01:00
parent 8f9420184a
commit ddd66c5154
7 changed files with 54 additions and 48 deletions

View File

@ -466,12 +466,9 @@ void __i915_add_request(struct drm_i915_gem_request *request,
*/
request->postfix = ring->tail;
if (i915.enable_execlists)
ret = engine->emit_request(request);
else
ret = engine->add_request(request);
/* Not allowed to fail! */
WARN(ret, "emit|add_request failed: %d!\n", ret);
ret = engine->emit_request(request);
WARN(ret, "(%s)->emit_request failed: %d!\n", engine->name, ret);
/* Sanity check that the reserved size was large enough. */
ret = ring->tail - request_start;
@ -483,6 +480,7 @@ void __i915_add_request(struct drm_i915_gem_request *request,
reserved_tail, ret);
i915_gem_mark_busy(engine);
engine->submit_request(request);
}
static unsigned long local_clock_us(unsigned int *cpu)

View File

@ -585,7 +585,7 @@ static int guc_ring_doorbell(struct i915_guc_client *gc)
* The only error here arises if the doorbell hardware isn't functioning
* as expected, which really shouln't happen.
*/
int i915_guc_submit(struct drm_i915_gem_request *rq)
static void i915_guc_submit(struct drm_i915_gem_request *rq)
{
unsigned int engine_id = rq->engine->id;
struct intel_guc *guc = &rq->i915->guc;
@ -602,8 +602,6 @@ int i915_guc_submit(struct drm_i915_gem_request *rq)
guc->submissions[engine_id] += 1;
guc->last_seqno[engine_id] = rq->fence.seqno;
return b_ret;
}
/*
@ -992,6 +990,7 @@ int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
{
struct intel_guc *guc = &dev_priv->guc;
struct i915_guc_client *client;
struct intel_engine_cs *engine;
/* client for execbuf submission */
client = guc_client_alloc(dev_priv,
@ -1006,6 +1005,10 @@ int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
host2guc_sample_forcewake(guc, client);
guc_init_doorbell_hw(guc);
/* Take over from manual control of ELSP (execlists) */
for_each_engine(engine, dev_priv)
engine->submit_request = i915_guc_submit;
return 0;
}
@ -1013,8 +1016,14 @@ void i915_guc_submission_disable(struct drm_i915_private *dev_priv)
{
struct intel_guc *guc = &dev_priv->guc;
if (!guc->execbuf_client)
return;
guc_client_free(dev_priv, guc->execbuf_client);
guc->execbuf_client = NULL;
/* Revert back to manual ELSP submission */
intel_execlists_enable_submission(dev_priv);
}
void i915_guc_submission_fini(struct drm_i915_private *dev_priv)

View File

@ -160,7 +160,6 @@ extern int intel_guc_resume(struct drm_device *dev);
int i915_guc_submission_init(struct drm_i915_private *dev_priv);
int i915_guc_submission_enable(struct drm_i915_private *dev_priv);
int i915_guc_wq_check_space(struct drm_i915_gem_request *rq);
int i915_guc_submit(struct drm_i915_gem_request *rq);
void i915_guc_submission_disable(struct drm_i915_private *dev_priv);
void i915_guc_submission_fini(struct drm_i915_private *dev_priv);

View File

@ -738,7 +738,7 @@ err_unpin:
}
/*
* intel_logical_ring_advance_and_submit() - advance the tail and submit the workload
* intel_logical_ring_advance() - advance the tail and prepare for submission
* @request: Request to advance the logical ringbuffer of.
*
* The tail is updated in our logical ringbuffer struct, not in the actual context. What
@ -747,7 +747,7 @@ err_unpin:
* point, the tail *inside* the context is updated and the ELSP written to.
*/
static int
intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
intel_logical_ring_advance(struct drm_i915_gem_request *request)
{
struct intel_ring *ring = request->ring;
struct intel_engine_cs *engine = request->engine;
@ -773,12 +773,6 @@ intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
*/
request->previous_context = engine->last_context;
engine->last_context = request->ctx;
if (i915.enable_guc_submission)
i915_guc_submit(request);
else
execlists_context_queue(request);
return 0;
}
@ -1768,7 +1762,7 @@ static int gen8_emit_request(struct drm_i915_gem_request *request)
intel_ring_emit(ring, request->fence.seqno);
intel_ring_emit(ring, MI_USER_INTERRUPT);
intel_ring_emit(ring, MI_NOOP);
return intel_logical_ring_advance_and_submit(request);
return intel_logical_ring_advance(request);
}
static int gen8_emit_request_render(struct drm_i915_gem_request *request)
@ -1799,7 +1793,7 @@ static int gen8_emit_request_render(struct drm_i915_gem_request *request)
intel_ring_emit(ring, 0);
intel_ring_emit(ring, MI_USER_INTERRUPT);
intel_ring_emit(ring, MI_NOOP);
return intel_logical_ring_advance_and_submit(request);
return intel_logical_ring_advance(request);
}
static int intel_lr_context_render_state_init(struct drm_i915_gem_request *req)
@ -1900,13 +1894,23 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
engine->i915 = NULL;
}
void intel_execlists_enable_submission(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
for_each_engine(engine, dev_priv)
engine->submit_request = execlists_context_queue;
}
static void
logical_ring_default_vfuncs(struct intel_engine_cs *engine)
{
/* Default vfuncs which can be overriden by each engine. */
engine->init_hw = gen8_init_common_ring;
engine->emit_request = gen8_emit_request;
engine->emit_flush = gen8_emit_flush;
engine->emit_request = gen8_emit_request;
engine->submit_request = execlists_context_queue;
engine->irq_enable = gen8_logical_ring_enable_irq;
engine->irq_disable = gen8_logical_ring_disable_irq;
engine->emit_bb_start = gen8_emit_bb_start;

View File

@ -95,6 +95,8 @@ uint64_t intel_lr_context_descriptor(struct i915_gem_context *ctx,
/* Execlists */
int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv,
int enable_execlists);
void intel_execlists_enable_submission(struct drm_i915_private *dev_priv);
struct i915_execbuffer_params;
int intel_execlists_submission(struct i915_execbuffer_params *params,
struct drm_i915_gem_execbuffer2 *args,

View File

@ -1428,15 +1428,14 @@ static int gen6_signal(struct drm_i915_gem_request *signaller_req,
}
/**
* gen6_add_request - Update the semaphore mailbox registers
* gen6_emit_request - Update the semaphore mailbox registers
*
* @request - request to write to the ring
*
* Update the mailbox registers in the *other* rings with the current seqno.
* This acts like a signal in the canonical semaphore.
*/
static int
gen6_add_request(struct drm_i915_gem_request *req)
static int gen6_emit_request(struct drm_i915_gem_request *req)
{
struct intel_engine_cs *engine = req->engine;
struct intel_ring *ring = req->ring;
@ -1457,13 +1456,11 @@ gen6_add_request(struct drm_i915_gem_request *req)
intel_ring_advance(ring);
req->tail = ring->tail;
engine->submit_request(req);
return 0;
}
static int
gen8_render_add_request(struct drm_i915_gem_request *req)
static int gen8_render_emit_request(struct drm_i915_gem_request *req)
{
struct intel_engine_cs *engine = req->engine;
struct intel_ring *ring = req->ring;
@ -1487,9 +1484,9 @@ gen8_render_add_request(struct drm_i915_gem_request *req)
intel_ring_emit(ring, 0);
intel_ring_emit(ring, MI_USER_INTERRUPT);
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
req->tail = ring->tail;
engine->submit_request(req);
return 0;
}
@ -1692,8 +1689,7 @@ bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode)
return 0;
}
static int
i9xx_add_request(struct drm_i915_gem_request *req)
static int i9xx_emit_request(struct drm_i915_gem_request *req)
{
struct intel_ring *ring = req->ring;
int ret;
@ -1709,7 +1705,6 @@ i9xx_add_request(struct drm_i915_gem_request *req)
intel_ring_advance(ring);
req->tail = ring->tail;
req->engine->submit_request(req);
return 0;
}
@ -2814,11 +2809,11 @@ static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
struct intel_engine_cs *engine)
{
engine->init_hw = init_ring_common;
engine->submit_request = i9xx_submit_request;
engine->add_request = i9xx_add_request;
engine->emit_request = i9xx_emit_request;
if (INTEL_GEN(dev_priv) >= 6)
engine->add_request = gen6_add_request;
engine->emit_request = gen6_emit_request;
engine->submit_request = i9xx_submit_request;
if (INTEL_GEN(dev_priv) >= 8)
engine->emit_bb_start = gen8_emit_bb_start;
@ -2847,7 +2842,7 @@ int intel_init_render_ring_buffer(struct intel_engine_cs *engine)
if (INTEL_GEN(dev_priv) >= 8) {
engine->init_context = intel_rcs_ctx_init;
engine->add_request = gen8_render_add_request;
engine->emit_request = gen8_render_emit_request;
engine->emit_flush = gen8_render_ring_flush;
if (i915.semaphores)
engine->semaphore.signal = gen8_rcs_signal;

View File

@ -204,7 +204,19 @@ struct intel_engine_cs {
int (*init_context)(struct drm_i915_gem_request *req);
int (*add_request)(struct drm_i915_gem_request *req);
int (*emit_flush)(struct drm_i915_gem_request *request,
u32 mode);
#define EMIT_INVALIDATE BIT(0)
#define EMIT_FLUSH BIT(1)
#define EMIT_BARRIER (EMIT_INVALIDATE | EMIT_FLUSH)
int (*emit_bb_start)(struct drm_i915_gem_request *req,
u64 offset, u32 length,
unsigned int dispatch_flags);
#define I915_DISPATCH_SECURE BIT(0)
#define I915_DISPATCH_PINNED BIT(1)
#define I915_DISPATCH_RS BIT(2)
int (*emit_request)(struct drm_i915_gem_request *req);
void (*submit_request)(struct drm_i915_gem_request *req);
/* Some chipsets are not quite as coherent as advertised and need
* an expensive kick to force a true read of the up-to-date seqno.
* However, the up-to-date seqno is not always required and the last
@ -282,19 +294,6 @@ struct intel_engine_cs {
unsigned int idle_lite_restore_wa;
bool disable_lite_restore_wa;
u32 ctx_desc_template;
int (*emit_request)(struct drm_i915_gem_request *request);
int (*emit_flush)(struct drm_i915_gem_request *request,
u32 mode);
#define EMIT_INVALIDATE BIT(0)
#define EMIT_FLUSH BIT(1)
#define EMIT_BARRIER (EMIT_INVALIDATE | EMIT_FLUSH)
int (*emit_bb_start)(struct drm_i915_gem_request *req,
u64 offset, u32 length,
unsigned int dispatch_flags);
#define I915_DISPATCH_SECURE 0x1
#define I915_DISPATCH_PINNED 0x2
#define I915_DISPATCH_RS 0x4
void (*submit_request)(struct drm_i915_gem_request *req);
/**
* List of objects currently involved in rendering from the