1
0
Fork 0

drm/i915: Hold irq-off for the entire fake lock period

Sadly lockdep records when the irqs are re-enabled and then marks up the
fake lock as being irq-unsafe. Our hand is forced and so we must mark up
the entire fake lock critical section as irq-off.

Hopefully this is the last tweak required!

v2: Not quite, we need to mark the timeline spinlock as irqsafe. That
was a genuine bug being hidden by the earlier lockdep splat.

Fixes: d67739268c ("drm/i915/gt: Mark up the nested engine-pm timeline lock as irqsafe")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190823132700.25286-2-chris@chris-wilson.co.uk
(cherry picked from commit 6dcb85a0ad)
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
alistair/sunxi64-5.4-dsi
Chris Wilson 2019-08-23 14:26:46 +01:00 committed by Rodrigo Vivi
parent 0ac072cced
commit ff36c5c4fd
5 changed files with 42 additions and 29 deletions

View File

@ -39,27 +39,32 @@ static int __engine_unpark(struct intel_wakeref *wf)
#if IS_ENABLED(CONFIG_LOCKDEP)
static inline void __timeline_mark_lock(struct intel_context *ce)
static inline unsigned long __timeline_mark_lock(struct intel_context *ce)
{
unsigned long flags;
local_irq_save(flags);
mutex_acquire(&ce->timeline->mutex.dep_map, 2, 0, _THIS_IP_);
local_irq_restore(flags);
return flags;
}
static inline void __timeline_mark_unlock(struct intel_context *ce)
static inline void __timeline_mark_unlock(struct intel_context *ce,
unsigned long flags)
{
mutex_release(&ce->timeline->mutex.dep_map, 0, _THIS_IP_);
local_irq_restore(flags);
}
#else
static inline void __timeline_mark_lock(struct intel_context *ce)
static inline unsigned long __timeline_mark_lock(struct intel_context *ce)
{
return 0;
}
static inline void __timeline_mark_unlock(struct intel_context *ce)
static inline void __timeline_mark_unlock(struct intel_context *ce,
unsigned long flags)
{
}
@ -68,6 +73,8 @@ static inline void __timeline_mark_unlock(struct intel_context *ce)
static bool switch_to_kernel_context(struct intel_engine_cs *engine)
{
struct i915_request *rq;
unsigned long flags;
bool result = true;
/* Already inside the kernel context, safe to power down. */
if (engine->wakeref_serial == engine->serial)
@ -89,12 +96,12 @@ static bool switch_to_kernel_context(struct intel_engine_cs *engine)
* retiring the last request, thus all rings should be empty and
* all timelines idle.
*/
__timeline_mark_lock(engine->kernel_context);
flags = __timeline_mark_lock(engine->kernel_context);
rq = __i915_request_create(engine->kernel_context, GFP_NOWAIT);
if (IS_ERR(rq))
/* Context switch failed, hope for the best! Maybe reset? */
return true;
goto out_unlock;
intel_timeline_enter(rq->timeline);
@ -110,9 +117,10 @@ static bool switch_to_kernel_context(struct intel_engine_cs *engine)
__intel_wakeref_defer_park(&engine->wakeref);
__i915_request_queue(rq, NULL);
__timeline_mark_unlock(engine->kernel_context);
return false;
result = false;
out_unlock:
__timeline_mark_unlock(engine->kernel_context, flags);
return result;
}
static int __engine_park(struct intel_wakeref *wf)

View File

@ -792,6 +792,7 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt)
{
struct intel_gt_timelines *timelines = &gt->timelines;
struct intel_timeline *tl;
unsigned long flags;
if (!test_bit(I915_WEDGED, &gt->reset.flags))
return true;
@ -811,7 +812,7 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt)
*
* No more can be submitted until we reset the wedged bit.
*/
spin_lock(&timelines->lock);
spin_lock_irqsave(&timelines->lock, flags);
list_for_each_entry(tl, &timelines->active_list, link) {
struct i915_request *rq;
@ -819,7 +820,7 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt)
if (!rq)
continue;
spin_unlock(&timelines->lock);
spin_unlock_irqrestore(&timelines->lock, flags);
/*
* All internal dependencies (i915_requests) will have
@ -832,10 +833,10 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt)
i915_request_put(rq);
/* Restart iteration after droping lock */
spin_lock(&timelines->lock);
spin_lock_irqsave(&timelines->lock, flags);
tl = list_entry(&timelines->active_list, typeof(*tl), link);
}
spin_unlock(&timelines->lock);
spin_unlock_irqrestore(&timelines->lock, flags);
intel_gt_sanitize(gt, false);

View File

@ -337,6 +337,7 @@ int intel_timeline_pin(struct intel_timeline *tl)
void intel_timeline_enter(struct intel_timeline *tl)
{
struct intel_gt_timelines *timelines = &tl->gt->timelines;
unsigned long flags;
lockdep_assert_held(&tl->mutex);
@ -345,14 +346,15 @@ void intel_timeline_enter(struct intel_timeline *tl)
return;
GEM_BUG_ON(!tl->active_count); /* overflow? */
spin_lock(&timelines->lock);
spin_lock_irqsave(&timelines->lock, flags);
list_add(&tl->link, &timelines->active_list);
spin_unlock(&timelines->lock);
spin_unlock_irqrestore(&timelines->lock, flags);
}
void intel_timeline_exit(struct intel_timeline *tl)
{
struct intel_gt_timelines *timelines = &tl->gt->timelines;
unsigned long flags;
lockdep_assert_held(&tl->mutex);
@ -360,9 +362,9 @@ void intel_timeline_exit(struct intel_timeline *tl)
if (--tl->active_count)
return;
spin_lock(&timelines->lock);
spin_lock_irqsave(&timelines->lock, flags);
list_del(&tl->link);
spin_unlock(&timelines->lock);
spin_unlock_irqrestore(&timelines->lock, flags);
/*
* Since this timeline is idle, all bariers upon which we were waiting

View File

@ -889,12 +889,13 @@ void i915_gem_runtime_suspend(struct drm_i915_private *i915)
static long
wait_for_timelines(struct drm_i915_private *i915,
unsigned int flags, long timeout)
unsigned int wait, long timeout)
{
struct intel_gt_timelines *timelines = &i915->gt.timelines;
struct intel_timeline *tl;
unsigned long flags;
spin_lock(&timelines->lock);
spin_lock_irqsave(&timelines->lock, flags);
list_for_each_entry(tl, &timelines->active_list, link) {
struct i915_request *rq;
@ -902,7 +903,7 @@ wait_for_timelines(struct drm_i915_private *i915,
if (!rq)
continue;
spin_unlock(&timelines->lock);
spin_unlock_irqrestore(&timelines->lock, flags);
/*
* "Race-to-idle".
@ -913,19 +914,19 @@ wait_for_timelines(struct drm_i915_private *i915,
* want to complete as quickly as possible to avoid prolonged
* stalls, so allow the gpu to boost to maximum clocks.
*/
if (flags & I915_WAIT_FOR_IDLE_BOOST)
if (wait & I915_WAIT_FOR_IDLE_BOOST)
gen6_rps_boost(rq);
timeout = i915_request_wait(rq, flags, timeout);
timeout = i915_request_wait(rq, wait, timeout);
i915_request_put(rq);
if (timeout < 0)
return timeout;
/* restart after reacquiring the lock */
spin_lock(&timelines->lock);
spin_lock_irqsave(&timelines->lock, flags);
tl = list_entry(&timelines->active_list, typeof(*tl), link);
}
spin_unlock(&timelines->lock);
spin_unlock_irqrestore(&timelines->lock, flags);
return timeout;
}

View File

@ -1465,9 +1465,10 @@ bool i915_retire_requests(struct drm_i915_private *i915)
{
struct intel_gt_timelines *timelines = &i915->gt.timelines;
struct intel_timeline *tl, *tn;
unsigned long flags;
LIST_HEAD(free);
spin_lock(&timelines->lock);
spin_lock_irqsave(&timelines->lock, flags);
list_for_each_entry_safe(tl, tn, &timelines->active_list, link) {
if (!mutex_trylock(&tl->mutex))
continue;
@ -1475,11 +1476,11 @@ bool i915_retire_requests(struct drm_i915_private *i915)
intel_timeline_get(tl);
GEM_BUG_ON(!tl->active_count);
tl->active_count++; /* pin the list element */
spin_unlock(&timelines->lock);
spin_unlock_irqrestore(&timelines->lock, flags);
retire_requests(tl);
spin_lock(&timelines->lock);
spin_lock_irqsave(&timelines->lock, flags);
/* Resume iteration after dropping lock */
list_safe_reset_next(tl, tn, link);
@ -1494,7 +1495,7 @@ bool i915_retire_requests(struct drm_i915_private *i915)
list_add(&tl->link, &free);
}
}
spin_unlock(&timelines->lock);
spin_unlock_irqrestore(&timelines->lock, flags);
list_for_each_entry_safe(tl, tn, &free, link)
__intel_timeline_free(&tl->kref);