1
0
Fork 0

drm/amdgpu: fix bo list handling in CS

We didn't initialized the mutex in the cloned bo list resulting in nice
warnings from lockdep. Also fixes error handling in this function.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
hifive-unleashed-5.1
Christian König 2015-08-04 11:54:48 +02:00 committed by Alex Deucher
parent 351dba7369
commit 34cb581a7d
3 changed files with 37 additions and 56 deletions

View File

@ -1077,12 +1077,11 @@ struct amdgpu_bo_list {
struct amdgpu_bo_list_entry *array;
};
struct amdgpu_bo_list *
amdgpu_bo_list_clone(struct amdgpu_bo_list *list);
struct amdgpu_bo_list *
amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id);
void amdgpu_bo_list_put(struct amdgpu_bo_list *list);
void amdgpu_bo_list_copy(struct amdgpu_device *adev,
struct amdgpu_bo_list *dst,
struct amdgpu_bo_list *src);
void amdgpu_bo_list_free(struct amdgpu_bo_list *list);
/*

View File

@ -62,6 +62,39 @@ static int amdgpu_bo_list_create(struct amdgpu_fpriv *fpriv,
return 0;
}
struct amdgpu_bo_list *
amdgpu_bo_list_clone(struct amdgpu_bo_list *list)
{
struct amdgpu_bo_list *result;
unsigned i;
result = kmalloc(sizeof(struct amdgpu_bo_list), GFP_KERNEL);
if (!result)
return NULL;
result->array = drm_calloc_large(list->num_entries,
sizeof(struct amdgpu_bo_list_entry));
if (!result->array) {
kfree(result);
return NULL;
}
mutex_init(&result->lock);
result->gds_obj = list->gds_obj;
result->gws_obj = list->gws_obj;
result->oa_obj = list->oa_obj;
result->has_userptr = list->has_userptr;
result->num_entries = list->num_entries;
memcpy(result->array, list->array, list->num_entries *
sizeof(struct amdgpu_bo_list_entry));
for (i = 0; i < result->num_entries; ++i)
amdgpu_bo_ref(result->array[i].robj);
return result;
}
static void amdgpu_bo_list_destroy(struct amdgpu_fpriv *fpriv, int id)
{
struct amdgpu_bo_list *list;
@ -166,56 +199,6 @@ void amdgpu_bo_list_put(struct amdgpu_bo_list *list)
mutex_unlock(&list->lock);
}
void amdgpu_bo_list_copy(struct amdgpu_device *adev,
struct amdgpu_bo_list *dst,
struct amdgpu_bo_list *src)
{
struct amdgpu_bo_list_entry *array;
struct amdgpu_bo *gds_obj = adev->gds.gds_gfx_bo;
struct amdgpu_bo *gws_obj = adev->gds.gws_gfx_bo;
struct amdgpu_bo *oa_obj = adev->gds.oa_gfx_bo;
bool has_userptr = false;
unsigned i;
array = drm_calloc_large(src->num_entries, sizeof(struct amdgpu_bo_list_entry));
if (!array)
return;
memset(array, 0, src->num_entries * sizeof(struct amdgpu_bo_list_entry));
for (i = 0; i < src->num_entries; ++i) {
memcpy(array, src->array,
src->num_entries * sizeof(struct amdgpu_bo_list_entry));
array[i].robj = amdgpu_bo_ref(src->array[i].robj);
if (amdgpu_ttm_tt_has_userptr(array[i].robj->tbo.ttm)) {
has_userptr = true;
array[i].prefered_domains = AMDGPU_GEM_DOMAIN_GTT;
array[i].allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
}
array[i].tv.bo = &array[i].robj->tbo;
array[i].tv.shared = true;
if (array[i].prefered_domains == AMDGPU_GEM_DOMAIN_GDS)
gds_obj = array[i].robj;
if (array[i].prefered_domains == AMDGPU_GEM_DOMAIN_GWS)
gws_obj = array[i].robj;
if (array[i].prefered_domains == AMDGPU_GEM_DOMAIN_OA)
oa_obj = array[i].robj;
}
for (i = 0; i < dst->num_entries; ++i)
amdgpu_bo_unref(&dst->array[i].robj);
drm_free_large(dst->array);
dst->gds_obj = gds_obj;
dst->gws_obj = gws_obj;
dst->oa_obj = oa_obj;
dst->has_userptr = has_userptr;
dst->array = array;
dst->num_entries = src->num_entries;
}
void amdgpu_bo_list_free(struct amdgpu_bo_list *list)
{
unsigned i;

View File

@ -186,11 +186,10 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
}
bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle);
if (bo_list && !bo_list->has_userptr) {
p->bo_list = kzalloc(sizeof(struct amdgpu_bo_list), GFP_KERNEL);
p->bo_list = amdgpu_bo_list_clone(bo_list);
amdgpu_bo_list_put(bo_list);
if (!p->bo_list)
return -ENOMEM;
amdgpu_bo_list_copy(p->adev, p->bo_list, bo_list);
amdgpu_bo_list_put(bo_list);
} else if (bo_list && bo_list->has_userptr)
p->bo_list = bo_list;
else