1
0
Fork 0

Merge tag 'drm-intel-fixes-2017-06-27' of git://anongit.freedesktop.org/git/drm-intel into drm-fixes

Just a few minor fixes. Important one is the execbuf async fix (aka
ANDROID_native_sync). There was another patch for a display coherency
corner case on APL, but we've random-walked in that space too much,
and the cherry-pick looked really invasive.

* tag 'drm-intel-fixes-2017-06-27' of git://anongit.freedesktop.org/git/drm-intel:
  drm/i915: Disable EXEC_OBJECT_ASYNC when doing relocations
  drm/i915: Hold struct_mutex for per-file stats in debugfs/i915_gem_object
  drm/i915: Retire the VMA's fence tracker before unbinding
hifive-unleashed-5.1
Dave Airlie 2017-06-28 17:07:15 +10:00
commit 9ff1beb1d1
3 changed files with 24 additions and 4 deletions

View File

@ -292,6 +292,8 @@ static int per_file_stats(int id, void *ptr, void *data)
struct file_stats *stats = data;
struct i915_vma *vma;
lockdep_assert_held(&obj->base.dev->struct_mutex);
stats->count++;
stats->total += obj->base.size;
if (!obj->bind_count)
@ -476,6 +478,8 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
struct drm_i915_gem_request *request;
struct task_struct *task;
mutex_lock(&dev->struct_mutex);
memset(&stats, 0, sizeof(stats));
stats.file_priv = file->driver_priv;
spin_lock(&file->table_lock);
@ -487,7 +491,6 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
* still alive (e.g. get_pid(current) => fork() => exit()).
* Therefore, we need to protect this ->comm access using RCU.
*/
mutex_lock(&dev->struct_mutex);
request = list_first_entry_or_null(&file_priv->mm.request_list,
struct drm_i915_gem_request,
client_link);
@ -497,6 +500,7 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
PIDTYPE_PID);
print_file_stats(m, task ? task->comm : "<unknown>", stats);
rcu_read_unlock();
mutex_unlock(&dev->struct_mutex);
}
mutex_unlock(&dev->filelist_mutex);

View File

@ -546,11 +546,12 @@ repeat:
}
static int
i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
i915_gem_execbuffer_relocate_entry(struct i915_vma *vma,
struct eb_vmas *eb,
struct drm_i915_gem_relocation_entry *reloc,
struct reloc_cache *cache)
{
struct drm_i915_gem_object *obj = vma->obj;
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
struct drm_gem_object *target_obj;
struct drm_i915_gem_object *target_i915_obj;
@ -628,6 +629,16 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
return -EINVAL;
}
/*
* If we write into the object, we need to force the synchronisation
* barrier, either with an asynchronous clflush or if we executed the
* patching using the GPU (though that should be serialised by the
* timeline). To be completely sure, and since we are required to
* do relocations we are already stalling, disable the user's opt
* of our synchronisation.
*/
vma->exec_entry->flags &= ~EXEC_OBJECT_ASYNC;
ret = relocate_entry(obj, reloc, cache, target_offset);
if (ret)
return ret;
@ -678,7 +689,7 @@ i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
do {
u64 offset = r->presumed_offset;
ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r, &cache);
ret = i915_gem_execbuffer_relocate_entry(vma, eb, r, &cache);
if (ret)
goto out;
@ -726,7 +737,7 @@ i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma,
reloc_cache_init(&cache, eb->i915);
for (i = 0; i < entry->relocation_count; i++) {
ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i], &cache);
ret = i915_gem_execbuffer_relocate_entry(vma, eb, &relocs[i], &cache);
if (ret)
break;
}

View File

@ -650,6 +650,11 @@ int i915_vma_unbind(struct i915_vma *vma)
break;
}
if (!ret) {
ret = i915_gem_active_retire(&vma->last_fence,
&vma->vm->i915->drm.struct_mutex);
}
__i915_vma_unpin(vma);
if (ret)
return ret;