1
0
Fork 0

drm/i915: Pin relocations for the duration of constructing the execbuffer

As the execbuffer dispatch grows ever more complex and involves multiple
stages of moving objects into the aperture, we need to take greater care
that we do not evict our execbuffer objects prior to dispatch. This is
relatively simple as we can just keep the objects pinned for not just
the relocation but until we are finished.

One such example is the possibility of the context switch causing an
eviction or hitting the shrinker in order to fit its object into the
aperture.

Link: http://lists.freedesktop.org/archives/intel-gfx/2013-November/036166.html
Reported-by: "Siluvery, Arun" <arun.siluvery@intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Ben Widawsky <benjamin.widawsky@intel.com>
Cc: Daniel Vetter <daniel@ffwll.ch>
Cc: stable@vger.kernel.org
Acked-by: Ben Widawsky <ben@bwidawsk.net>
Tested-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: Add the additional explanations from Chris to the commit
message.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
wifi-calibration
Chris Wilson 2013-11-26 11:23:15 +00:00 committed by Daniel Vetter
parent 7c063c7259
commit a415d35564
1 changed files with 32 additions and 28 deletions

View File

@ -33,6 +33,9 @@
#include "intel_drv.h"
#include <linux/dma_remapping.h>
#define __EXEC_OBJECT_HAS_PIN (1<<31)
#define __EXEC_OBJECT_HAS_FENCE (1<<30)
struct eb_vmas {
struct list_head vmas;
int and;
@ -187,7 +190,28 @@ static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle)
}
}
static void eb_destroy(struct eb_vmas *eb) {
static void
i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
{
struct drm_i915_gem_exec_object2 *entry;
struct drm_i915_gem_object *obj = vma->obj;
if (!drm_mm_node_allocated(&vma->node))
return;
entry = vma->exec_entry;
if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
i915_gem_object_unpin_fence(obj);
if (entry->flags & __EXEC_OBJECT_HAS_PIN)
i915_gem_object_unpin(obj);
entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
}
static void eb_destroy(struct eb_vmas *eb)
{
while (!list_empty(&eb->vmas)) {
struct i915_vma *vma;
@ -195,6 +219,7 @@ static void eb_destroy(struct eb_vmas *eb) {
struct i915_vma,
exec_list);
list_del_init(&vma->exec_list);
i915_gem_execbuffer_unreserve_vma(vma);
drm_gem_object_unreference(&vma->obj->base);
}
kfree(eb);
@ -478,9 +503,6 @@ i915_gem_execbuffer_relocate(struct eb_vmas *eb,
return ret;
}
#define __EXEC_OBJECT_HAS_PIN (1<<31)
#define __EXEC_OBJECT_HAS_FENCE (1<<30)
static int
need_reloc_mappable(struct i915_vma *vma)
{
@ -552,26 +574,6 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
return 0;
}
static void
i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
{
struct drm_i915_gem_exec_object2 *entry;
struct drm_i915_gem_object *obj = vma->obj;
if (!drm_mm_node_allocated(&vma->node))
return;
entry = vma->exec_entry;
if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
i915_gem_object_unpin_fence(obj);
if (entry->flags & __EXEC_OBJECT_HAS_PIN)
i915_gem_object_unpin(obj);
entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
}
static int
i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
struct list_head *vmas,
@ -670,13 +672,14 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
goto err;
}
err: /* Decrement pin count for bound objects */
list_for_each_entry(vma, vmas, exec_list)
i915_gem_execbuffer_unreserve_vma(vma);
err:
if (ret != -ENOSPC || retry++)
return ret;
/* Decrement pin count for bound objects */
list_for_each_entry(vma, vmas, exec_list)
i915_gem_execbuffer_unreserve_vma(vma);
ret = i915_gem_evict_vm(vm, true);
if (ret)
return ret;
@ -708,6 +711,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
while (!list_empty(&eb->vmas)) {
vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list);
list_del_init(&vma->exec_list);
i915_gem_execbuffer_unreserve_vma(vma);
drm_gem_object_unreference(&vma->obj->base);
}