1
0
Fork 0

drm/amdgpu: use embedded gem object

Drop drm_gem_object from amdgpu_bo, use the
ttm_buffer_object.base instead.

Build tested only.

Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20190805140119.7337-6-kraxel@redhat.com
alistair/sunxi64-5.4-dsi
Gerd Hoffmann 2019-08-05 16:01:07 +02:00
parent ce77038fda
commit c105de2828
6 changed files with 12 additions and 13 deletions

View File

@ -393,7 +393,7 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
bo->prime_shared_count = 1;
reservation_object_unlock(resv);
return &bo->gem_base;
return &bo->tbo.base;
error:
reservation_object_unlock(resv);

View File

@ -85,7 +85,7 @@ retry:
}
return r;
}
*obj = &bo->gem_base;
*obj = &bo->tbo.base;
return 0;
}
@ -689,7 +689,7 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
struct drm_amdgpu_gem_create_in info;
void __user *out = u64_to_user_ptr(args->value);
info.bo_size = robj->gem_base.size;
info.bo_size = robj->tbo.base.size;
info.alignment = robj->tbo.mem.page_alignment << PAGE_SHIFT;
info.domains = robj->preferred_domains;
info.domain_flags = robj->flags;
@ -819,8 +819,8 @@ static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data)
if (pin_count)
seq_printf(m, " pin count %d", pin_count);
dma_buf = READ_ONCE(bo->gem_base.dma_buf);
attachment = READ_ONCE(bo->gem_base.import_attach);
dma_buf = READ_ONCE(bo->tbo.base.dma_buf);
attachment = READ_ONCE(bo->tbo.base.import_attach);
if (attachment)
seq_printf(m, " imported from %p", dma_buf);

View File

@ -31,7 +31,7 @@
*/
#define AMDGPU_GEM_DOMAIN_MAX 0x3
#define gem_to_amdgpu_bo(gobj) container_of((gobj), struct amdgpu_bo, gem_base)
#define gem_to_amdgpu_bo(gobj) container_of((gobj), struct amdgpu_bo, tbo.base)
void amdgpu_gem_object_free(struct drm_gem_object *obj);
int amdgpu_gem_object_open(struct drm_gem_object *obj,

View File

@ -85,9 +85,9 @@ static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo)
amdgpu_bo_kunmap(bo);
if (bo->gem_base.import_attach)
drm_prime_gem_destroy(&bo->gem_base, bo->tbo.sg);
drm_gem_object_release(&bo->gem_base);
if (bo->tbo.base.import_attach)
drm_prime_gem_destroy(&bo->tbo.base, bo->tbo.sg);
drm_gem_object_release(&bo->tbo.base);
/* in case amdgpu_device_recover_vram got NULL of bo->parent */
if (!list_empty(&bo->shadow_list)) {
mutex_lock(&adev->shadow_list_lock);
@ -454,7 +454,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
bo = kzalloc(sizeof(struct amdgpu_bo), GFP_KERNEL);
if (bo == NULL)
return -ENOMEM;
drm_gem_private_object_init(adev->ddev, &bo->gem_base, size);
drm_gem_private_object_init(adev->ddev, &bo->tbo.base, size);
INIT_LIST_HEAD(&bo->shadow_list);
bo->vm_bo = NULL;
bo->preferred_domains = bp->preferred_domain ? bp->preferred_domain :
@ -509,7 +509,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
if (unlikely(r != 0))
return r;
bo->gem_base.resv = bo->tbo.resv;
bo->tbo.base.resv = bo->tbo.resv;
if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
bo->tbo.mem.mem_type == TTM_PL_VRAM &&

View File

@ -94,7 +94,6 @@ struct amdgpu_bo {
/* per VM structure for page tables and with virtual addresses */
struct amdgpu_vm_bo_base *vm_bo;
/* Constant after initialization */
struct drm_gem_object gem_base;
struct amdgpu_bo *parent;
struct amdgpu_bo *shadow;

View File

@ -227,7 +227,7 @@ static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp)
if (amdgpu_ttm_tt_get_usermm(bo->ttm))
return -EPERM;
return drm_vma_node_verify_access(&abo->gem_base.vma_node,
return drm_vma_node_verify_access(&abo->tbo.base.vma_node,
filp->private_data);
}