1
0
Fork 0

drm/ttm: initialize globals during device init (v2)

Make sure that the global BO state is always correctly initialized.

This allows removing all the device code to initialize it.

v2: fix up vbox (Alex)

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
hifive-unleashed-5.1
Christian König 2018-10-19 16:55:26 +02:00 committed by Alex Deucher
parent 62b53b37e4
commit a64f784bb1
27 changed files with 17 additions and 521 deletions

View File

@ -61,56 +61,6 @@ static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev);
static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev);
/*
* Global memory.
*/
/**
* amdgpu_ttm_global_init - Initialize global TTM memory reference structures.
*
* @adev: AMDGPU device for which the global structures need to be registered.
*
* This is called as part of the AMDGPU ttm init from amdgpu_ttm_init()
* during bring up.
*/
static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
{
struct drm_global_reference *global_ref;
int r;
/* ensure reference is false in case init fails */
adev->mman.mem_global_referenced = false;
global_ref = &adev->mman.bo_global_ref.ref;
global_ref->global_type = DRM_GLOBAL_TTM_BO;
global_ref->size = sizeof(struct ttm_bo_global);
global_ref->init = &ttm_bo_global_ref_init;
global_ref->release = &ttm_bo_global_ref_release;
r = drm_global_item_ref(global_ref);
if (r) {
DRM_ERROR("Failed setting up TTM BO subsystem.\n");
goto error_bo;
}
mutex_init(&adev->mman.gtt_window_lock);
adev->mman.mem_global_referenced = true;
return 0;
error_bo:
return r;
}
static void amdgpu_ttm_global_fini(struct amdgpu_device *adev)
{
if (adev->mman.mem_global_referenced) {
mutex_destroy(&adev->mman.gtt_window_lock);
drm_global_item_unref(&adev->mman.bo_global_ref.ref);
adev->mman.mem_global_referenced = false;
}
}
static int amdgpu_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
{
return 0;
@ -1714,14 +1664,10 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
int r;
u64 vis_vram_limit;
/* initialize global references for vram/gtt */
r = amdgpu_ttm_global_init(adev);
if (r) {
return r;
}
mutex_init(&adev->mman.gtt_window_lock);
/* No others user of address space so set it to 0 */
r = ttm_bo_device_init(&adev->mman.bdev,
adev->mman.bo_global_ref.ref.object,
&amdgpu_bo_driver,
adev->ddev->anon_inode->i_mapping,
DRM_FILE_PAGE_OFFSET,
@ -1878,7 +1824,6 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GWS);
ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_OA);
ttm_bo_device_release(&adev->mman.bdev);
amdgpu_ttm_global_fini(adev);
adev->mman.initialized = false;
DRM_INFO("amdgpu: ttm finalized\n");
}

View File

@ -39,7 +39,6 @@
#define AMDGPU_GTT_NUM_TRANSFER_WINDOWS 2
struct amdgpu_mman {
struct ttm_bo_global_ref bo_global_ref;
struct ttm_bo_device bdev;
bool mem_global_referenced;
bool initialized;

View File

@ -104,7 +104,6 @@ struct ast_private {
int fb_mtrr;
struct {
struct ttm_bo_global_ref bo_global_ref;
struct ttm_bo_device bdev;
} ttm;

View File

@ -36,35 +36,6 @@ ast_bdev(struct ttm_bo_device *bd)
return container_of(bd, struct ast_private, ttm.bdev);
}
static int ast_ttm_global_init(struct ast_private *ast)
{
struct drm_global_reference *global_ref;
int r;
global_ref = &ast->ttm.bo_global_ref.ref;
global_ref->global_type = DRM_GLOBAL_TTM_BO;
global_ref->size = sizeof(struct ttm_bo_global);
global_ref->init = &ttm_bo_global_ref_init;
global_ref->release = &ttm_bo_global_ref_release;
r = drm_global_item_ref(global_ref);
if (r != 0) {
DRM_ERROR("Failed setting up TTM BO subsystem.\n");
return r;
}
return 0;
}
static void
ast_ttm_global_release(struct ast_private *ast)
{
if (ast->ttm.bo_global_ref.ref.release == NULL)
return;
drm_global_item_unref(&ast->ttm.bo_global_ref.ref);
ast->ttm.bo_global_ref.ref.release = NULL;
}
static void ast_bo_ttm_destroy(struct ttm_buffer_object *tbo)
{
struct ast_bo *bo;
@ -204,12 +175,7 @@ int ast_mm_init(struct ast_private *ast)
struct drm_device *dev = ast->dev;
struct ttm_bo_device *bdev = &ast->ttm.bdev;
ret = ast_ttm_global_init(ast);
if (ret)
return ret;
ret = ttm_bo_device_init(&ast->ttm.bdev,
ast->ttm.bo_global_ref.ref.object,
&ast_bo_driver,
dev->anon_inode->i_mapping,
DRM_FILE_PAGE_OFFSET,
@ -240,8 +206,6 @@ void ast_mm_fini(struct ast_private *ast)
ttm_bo_device_release(&ast->ttm.bdev);
ast_ttm_global_release(ast);
arch_phys_wc_del(ast->fb_mtrr);
arch_io_free_memtype_wc(pci_resource_start(dev->pdev, 0),
pci_resource_len(dev->pdev, 0));

View File

@ -76,7 +76,6 @@ struct bochs_device {
/* ttm */
struct {
struct ttm_bo_global_ref bo_global_ref;
struct ttm_bo_device bdev;
bool initialized;
} ttm;

View File

@ -16,35 +16,6 @@ static inline struct bochs_device *bochs_bdev(struct ttm_bo_device *bd)
return container_of(bd, struct bochs_device, ttm.bdev);
}
static int bochs_ttm_global_init(struct bochs_device *bochs)
{
struct drm_global_reference *global_ref;
int r;
global_ref = &bochs->ttm.bo_global_ref.ref;
global_ref->global_type = DRM_GLOBAL_TTM_BO;
global_ref->size = sizeof(struct ttm_bo_global);
global_ref->init = &ttm_bo_global_ref_init;
global_ref->release = &ttm_bo_global_ref_release;
r = drm_global_item_ref(global_ref);
if (r != 0) {
DRM_ERROR("Failed setting up TTM BO subsystem.\n");
return r;
}
return 0;
}
static void bochs_ttm_global_release(struct bochs_device *bochs)
{
if (bochs->ttm.bo_global_ref.ref.release == NULL)
return;
drm_global_item_unref(&bochs->ttm.bo_global_ref.ref);
bochs->ttm.bo_global_ref.ref.release = NULL;
}
static void bochs_bo_ttm_destroy(struct ttm_buffer_object *tbo)
{
struct bochs_bo *bo;
@ -182,12 +153,7 @@ int bochs_mm_init(struct bochs_device *bochs)
struct ttm_bo_device *bdev = &bochs->ttm.bdev;
int ret;
ret = bochs_ttm_global_init(bochs);
if (ret)
return ret;
ret = ttm_bo_device_init(&bochs->ttm.bdev,
bochs->ttm.bo_global_ref.ref.object,
&bochs_bo_driver,
bochs->dev->anon_inode->i_mapping,
DRM_FILE_PAGE_OFFSET,
@ -214,7 +180,6 @@ void bochs_mm_fini(struct bochs_device *bochs)
return;
ttm_bo_device_release(&bochs->ttm.bdev);
bochs_ttm_global_release(bochs);
bochs->ttm.initialized = false;
}

View File

@ -136,7 +136,6 @@ struct cirrus_device {
int fb_mtrr;
struct {
struct ttm_bo_global_ref bo_global_ref;
struct ttm_bo_device bdev;
} ttm;
bool mm_inited;

View File

@ -36,35 +36,6 @@ cirrus_bdev(struct ttm_bo_device *bd)
return container_of(bd, struct cirrus_device, ttm.bdev);
}
static int cirrus_ttm_global_init(struct cirrus_device *cirrus)
{
struct drm_global_reference *global_ref;
int r;
global_ref = &cirrus->ttm.bo_global_ref.ref;
global_ref->global_type = DRM_GLOBAL_TTM_BO;
global_ref->size = sizeof(struct ttm_bo_global);
global_ref->init = &ttm_bo_global_ref_init;
global_ref->release = &ttm_bo_global_ref_release;
r = drm_global_item_ref(global_ref);
if (r != 0) {
DRM_ERROR("Failed setting up TTM BO subsystem.\n");
return r;
}
return 0;
}
static void
cirrus_ttm_global_release(struct cirrus_device *cirrus)
{
if (cirrus->ttm.bo_global_ref.ref.release == NULL)
return;
drm_global_item_unref(&cirrus->ttm.bo_global_ref.ref);
cirrus->ttm.bo_global_ref.ref.release = NULL;
}
static void cirrus_bo_ttm_destroy(struct ttm_buffer_object *tbo)
{
struct cirrus_bo *bo;
@ -204,12 +175,7 @@ int cirrus_mm_init(struct cirrus_device *cirrus)
struct drm_device *dev = cirrus->dev;
struct ttm_bo_device *bdev = &cirrus->ttm.bdev;
ret = cirrus_ttm_global_init(cirrus);
if (ret)
return ret;
ret = ttm_bo_device_init(&cirrus->ttm.bdev,
cirrus->ttm.bo_global_ref.ref.object,
&cirrus_bo_driver,
dev->anon_inode->i_mapping,
DRM_FILE_PAGE_OFFSET,
@ -245,8 +211,6 @@ void cirrus_mm_fini(struct cirrus_device *cirrus)
ttm_bo_device_release(&cirrus->ttm.bdev);
cirrus_ttm_global_release(cirrus);
arch_phys_wc_del(cirrus->fb_mtrr);
cirrus->fb_mtrr = 0;
arch_io_free_memtype_wc(pci_resource_start(dev->pdev, 0),

View File

@ -49,7 +49,6 @@ struct hibmc_drm_private {
bool mode_config_initialized;
/* ttm */
struct ttm_bo_global_ref bo_global_ref;
struct ttm_bo_device bdev;
bool initialized;

View File

@ -29,32 +29,6 @@ hibmc_bdev(struct ttm_bo_device *bd)
return container_of(bd, struct hibmc_drm_private, bdev);
}
static int hibmc_ttm_global_init(struct hibmc_drm_private *hibmc)
{
int ret;
hibmc->bo_global_ref.ref.global_type = DRM_GLOBAL_TTM_BO;
hibmc->bo_global_ref.ref.size = sizeof(struct ttm_bo_global);
hibmc->bo_global_ref.ref.init = &ttm_bo_global_ref_init;
hibmc->bo_global_ref.ref.release = &ttm_bo_global_ref_release;
ret = drm_global_item_ref(&hibmc->bo_global_ref.ref);
if (ret) {
DRM_ERROR("failed setting up TTM BO subsystem: %d\n", ret);
return ret;
}
return 0;
}
static void
hibmc_ttm_global_release(struct hibmc_drm_private *hibmc)
{
if (hibmc->bo_global_ref.ref.release == NULL)
return;
drm_global_item_unref(&hibmc->bo_global_ref.ref);
hibmc->bo_global_ref.ref.release = NULL;
}
static void hibmc_bo_ttm_destroy(struct ttm_buffer_object *tbo)
{
struct hibmc_bo *bo = container_of(tbo, struct hibmc_bo, bo);
@ -214,18 +188,12 @@ int hibmc_mm_init(struct hibmc_drm_private *hibmc)
struct drm_device *dev = hibmc->dev;
struct ttm_bo_device *bdev = &hibmc->bdev;
ret = hibmc_ttm_global_init(hibmc);
if (ret)
return ret;
ret = ttm_bo_device_init(&hibmc->bdev,
hibmc->bo_global_ref.ref.object,
&hibmc_bo_driver,
dev->anon_inode->i_mapping,
DRM_FILE_PAGE_OFFSET,
true);
if (ret) {
hibmc_ttm_global_release(hibmc);
DRM_ERROR("error initializing bo driver: %d\n", ret);
return ret;
}
@ -233,7 +201,6 @@ int hibmc_mm_init(struct hibmc_drm_private *hibmc)
ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM,
hibmc->fb_size >> PAGE_SHIFT);
if (ret) {
hibmc_ttm_global_release(hibmc);
DRM_ERROR("failed ttm VRAM init: %d\n", ret);
return ret;
}
@ -248,7 +215,6 @@ void hibmc_mm_fini(struct hibmc_drm_private *hibmc)
return;
ttm_bo_device_release(&hibmc->bdev);
hibmc_ttm_global_release(hibmc);
hibmc->mm_inited = false;
}

View File

@ -212,7 +212,6 @@ struct mga_device {
int fb_mtrr;
struct {
struct ttm_bo_global_ref bo_global_ref;
struct ttm_bo_device bdev;
} ttm;

View File

@ -36,35 +36,6 @@ mgag200_bdev(struct ttm_bo_device *bd)
return container_of(bd, struct mga_device, ttm.bdev);
}
static int mgag200_ttm_global_init(struct mga_device *ast)
{
struct drm_global_reference *global_ref;
int r;
global_ref = &ast->ttm.bo_global_ref.ref;
global_ref->global_type = DRM_GLOBAL_TTM_BO;
global_ref->size = sizeof(struct ttm_bo_global);
global_ref->init = &ttm_bo_global_ref_init;
global_ref->release = &ttm_bo_global_ref_release;
r = drm_global_item_ref(global_ref);
if (r != 0) {
DRM_ERROR("Failed setting up TTM BO subsystem.\n");
return r;
}
return 0;
}
static void
mgag200_ttm_global_release(struct mga_device *ast)
{
if (ast->ttm.bo_global_ref.ref.release == NULL)
return;
drm_global_item_unref(&ast->ttm.bo_global_ref.ref);
ast->ttm.bo_global_ref.ref.release = NULL;
}
static void mgag200_bo_ttm_destroy(struct ttm_buffer_object *tbo)
{
struct mgag200_bo *bo;
@ -204,12 +175,7 @@ int mgag200_mm_init(struct mga_device *mdev)
struct drm_device *dev = mdev->dev;
struct ttm_bo_device *bdev = &mdev->ttm.bdev;
ret = mgag200_ttm_global_init(mdev);
if (ret)
return ret;
ret = ttm_bo_device_init(&mdev->ttm.bdev,
mdev->ttm.bo_global_ref.ref.object,
&mgag200_bo_driver,
dev->anon_inode->i_mapping,
DRM_FILE_PAGE_OFFSET,
@ -240,8 +206,6 @@ void mgag200_mm_fini(struct mga_device *mdev)
ttm_bo_device_release(&mdev->ttm.bdev);
mgag200_ttm_global_release(mdev);
arch_io_free_memtype_wc(pci_resource_start(dev->pdev, 0),
pci_resource_len(dev->pdev, 0));
arch_phys_wc_del(mdev->fb_mtrr);

View File

@ -146,7 +146,6 @@ struct nouveau_drm {
/* TTM interface support */
struct {
struct ttm_bo_global_ref bo_global_ref;
struct ttm_bo_device bdev;
atomic_t validate_sequence;
int (*move)(struct nouveau_channel *,

View File

@ -174,38 +174,6 @@ nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma)
return ttm_bo_mmap(filp, vma, &drm->ttm.bdev);
}
int
nouveau_ttm_global_init(struct nouveau_drm *drm)
{
struct drm_global_reference *global_ref;
int ret;
global_ref = &drm->ttm.bo_global_ref.ref;
global_ref->global_type = DRM_GLOBAL_TTM_BO;
global_ref->size = sizeof(struct ttm_bo_global);
global_ref->init = &ttm_bo_global_ref_init;
global_ref->release = &ttm_bo_global_ref_release;
ret = drm_global_item_ref(global_ref);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed setting up TTM BO subsystem\n");
drm->ttm.bo_global_ref.ref.release = NULL;
return ret;
}
return 0;
}
void
nouveau_ttm_global_release(struct nouveau_drm *drm)
{
if (drm->ttm.bo_global_ref.ref.release == NULL)
return;
drm_global_item_unref(&drm->ttm.bo_global_ref.ref);
drm->ttm.bo_global_ref.ref.release = NULL;
}
static int
nouveau_ttm_init_host(struct nouveau_drm *drm, u8 kind)
{
@ -268,12 +236,7 @@ nouveau_ttm_init(struct nouveau_drm *drm)
drm->agp.cma = pci->agp.cma;
}
ret = nouveau_ttm_global_init(drm);
if (ret)
return ret;
ret = ttm_bo_device_init(&drm->ttm.bdev,
drm->ttm.bo_global_ref.ref.object,
&nouveau_bo_driver,
dev->anon_inode->i_mapping,
DRM_FILE_PAGE_OFFSET,
@ -328,8 +291,6 @@ nouveau_ttm_fini(struct nouveau_drm *drm)
ttm_bo_device_release(&drm->ttm.bdev);
nouveau_ttm_global_release(drm);
arch_phys_wc_del(drm->ttm.mtrr);
drm->ttm.mtrr = 0;
arch_io_free_memtype_wc(device->func->resource_addr(device, 1),

View File

@ -127,8 +127,6 @@ struct qxl_output {
#define drm_encoder_to_qxl_output(x) container_of(x, struct qxl_output, enc)
struct qxl_mman {
struct ttm_bo_global_ref bo_global_ref;
bool mem_global_referenced;
struct ttm_bo_device bdev;
};

View File

@ -46,34 +46,6 @@ static struct qxl_device *qxl_get_qdev(struct ttm_bo_device *bdev)
return qdev;
}
static int qxl_ttm_global_init(struct qxl_device *qdev)
{
struct drm_global_reference *global_ref;
int r;
global_ref = &qdev->mman.bo_global_ref.ref;
global_ref->global_type = DRM_GLOBAL_TTM_BO;
global_ref->size = sizeof(struct ttm_bo_global);
global_ref->init = &ttm_bo_global_ref_init;
global_ref->release = &ttm_bo_global_ref_release;
r = drm_global_item_ref(global_ref);
if (r != 0) {
DRM_ERROR("Failed setting up TTM BO subsystem.\n");
return r;
}
qdev->mman.mem_global_referenced = true;
return 0;
}
static void qxl_ttm_global_fini(struct qxl_device *qdev)
{
if (qdev->mman.mem_global_referenced) {
drm_global_item_unref(&qdev->mman.bo_global_ref.ref);
qdev->mman.mem_global_referenced = false;
}
}
static struct vm_operations_struct qxl_ttm_vm_ops;
static const struct vm_operations_struct *ttm_vm_ops;
@ -345,12 +317,8 @@ int qxl_ttm_init(struct qxl_device *qdev)
int r;
int num_io_pages; /* != rom->num_io_pages, we include surface0 */
r = qxl_ttm_global_init(qdev);
if (r)
return r;
/* No others user of address space so set it to 0 */
r = ttm_bo_device_init(&qdev->mman.bdev,
qdev->mman.bo_global_ref.ref.object,
&qxl_bo_driver,
qdev->ddev.anon_inode->i_mapping,
DRM_FILE_PAGE_OFFSET, 0);
@ -386,7 +354,6 @@ void qxl_ttm_fini(struct qxl_device *qdev)
ttm_bo_clean_mm(&qdev->mman.bdev, TTM_PL_VRAM);
ttm_bo_clean_mm(&qdev->mman.bdev, TTM_PL_PRIV);
ttm_bo_device_release(&qdev->mman.bdev);
qxl_ttm_global_fini(qdev);
DRM_INFO("qxl: ttm finalized\n");
}

View File

@ -448,9 +448,7 @@ struct radeon_surface_reg {
* TTM.
*/
struct radeon_mman {
struct ttm_bo_global_ref bo_global_ref;
struct ttm_bo_device bdev;
bool mem_global_referenced;
bool initialized;
#if defined(CONFIG_DEBUG_FS)

View File

@ -60,39 +60,6 @@ static struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev)
return rdev;
}
/*
* Global memory.
*/
static int radeon_ttm_global_init(struct radeon_device *rdev)
{
struct drm_global_reference *global_ref;
int r;
rdev->mman.mem_global_referenced = false;
global_ref = &rdev->mman.bo_global_ref.ref;
global_ref->global_type = DRM_GLOBAL_TTM_BO;
global_ref->size = sizeof(struct ttm_bo_global);
global_ref->init = &ttm_bo_global_ref_init;
global_ref->release = &ttm_bo_global_ref_release;
r = drm_global_item_ref(global_ref);
if (r != 0) {
DRM_ERROR("Failed setting up TTM BO subsystem.\n");
return r;
}
rdev->mman.mem_global_referenced = true;
return 0;
}
static void radeon_ttm_global_fini(struct radeon_device *rdev)
{
if (rdev->mman.mem_global_referenced) {
drm_global_item_unref(&rdev->mman.bo_global_ref.ref);
rdev->mman.mem_global_referenced = false;
}
}
static int radeon_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
{
return 0;
@ -821,13 +788,8 @@ int radeon_ttm_init(struct radeon_device *rdev)
{
int r;
r = radeon_ttm_global_init(rdev);
if (r) {
return r;
}
/* No others user of address space so set it to 0 */
r = ttm_bo_device_init(&rdev->mman.bdev,
rdev->mman.bo_global_ref.ref.object,
&radeon_bo_driver,
rdev->ddev->anon_inode->i_mapping,
DRM_FILE_PAGE_OFFSET,
@ -899,7 +861,6 @@ void radeon_ttm_fini(struct radeon_device *rdev)
ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_TT);
ttm_bo_device_release(&rdev->mman.bdev);
radeon_gart_fini(rdev);
radeon_ttm_global_fini(rdev);
rdev->mman.initialized = false;
DRM_INFO("radeon: ttm finalized\n");
}

View File

@ -1530,7 +1530,7 @@ static void ttm_bo_global_kobj_release(struct kobject *kobj)
kfree(glob);
}
void ttm_bo_global_release(void)
static void ttm_bo_global_release(void)
{
struct ttm_bo_global *glob = &ttm_bo_glob;
@ -1544,9 +1544,8 @@ void ttm_bo_global_release(void)
out:
mutex_unlock(&ttm_global_mutex);
}
EXPORT_SYMBOL(ttm_bo_global_release);
int ttm_bo_global_init(void)
static int ttm_bo_global_init(void)
{
struct ttm_bo_global *glob = &ttm_bo_glob;
int ret = 0;
@ -1583,8 +1582,6 @@ out:
mutex_unlock(&ttm_global_mutex);
return ret;
}
EXPORT_SYMBOL(ttm_bo_global_init);
int ttm_bo_device_release(struct ttm_bo_device *bdev)
{
@ -1623,18 +1620,25 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev)
drm_vma_offset_manager_destroy(&bdev->vma_manager);
if (!ret)
ttm_bo_global_release();
return ret;
}
EXPORT_SYMBOL(ttm_bo_device_release);
int ttm_bo_device_init(struct ttm_bo_device *bdev,
struct ttm_bo_global *glob,
struct ttm_bo_driver *driver,
struct address_space *mapping,
uint64_t file_page_offset,
bool need_dma32)
{
int ret = -EINVAL;
struct ttm_bo_global *glob = &ttm_bo_glob;
int ret;
ret = ttm_bo_global_init();
if (ret)
return ret;
bdev->driver = driver;
@ -1661,6 +1665,7 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
return 0;
out_no_sys:
ttm_bo_global_release();
return ret;
}
EXPORT_SYMBOL(ttm_bo_device_init);

View File

@ -142,8 +142,6 @@ struct virtio_gpu_fbdev {
};
struct virtio_gpu_mman {
struct ttm_bo_global_ref bo_global_ref;
bool mem_global_referenced;
struct ttm_bo_device bdev;
};

View File

@ -50,35 +50,6 @@ virtio_gpu_device *virtio_gpu_get_vgdev(struct ttm_bo_device *bdev)
return vgdev;
}
static int virtio_gpu_ttm_global_init(struct virtio_gpu_device *vgdev)
{
struct drm_global_reference *global_ref;
int r;
vgdev->mman.mem_global_referenced = false;
global_ref = &vgdev->mman.bo_global_ref.ref;
global_ref->global_type = DRM_GLOBAL_TTM_BO;
global_ref->size = sizeof(struct ttm_bo_global);
global_ref->init = &ttm_bo_global_ref_init;
global_ref->release = &ttm_bo_global_ref_release;
r = drm_global_item_ref(global_ref);
if (r != 0) {
DRM_ERROR("Failed setting up TTM BO subsystem.\n");
return r;
}
vgdev->mman.mem_global_referenced = true;
return 0;
}
static void virtio_gpu_ttm_global_fini(struct virtio_gpu_device *vgdev)
{
if (vgdev->mman.mem_global_referenced) {
drm_global_item_unref(&vgdev->mman.bo_global_ref.ref);
vgdev->mman.mem_global_referenced = false;
}
}
int virtio_gpu_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct drm_file *file_priv;
@ -356,12 +327,8 @@ int virtio_gpu_ttm_init(struct virtio_gpu_device *vgdev)
{
int r;
r = virtio_gpu_ttm_global_init(vgdev);
if (r)
return r;
/* No others user of address space so set it to 0 */
r = ttm_bo_device_init(&vgdev->mman.bdev,
vgdev->mman.bo_global_ref.ref.object,
&virtio_gpu_bo_driver,
vgdev->ddev->anon_inode->i_mapping,
DRM_FILE_PAGE_OFFSET, 0);
@ -380,13 +347,11 @@ int virtio_gpu_ttm_init(struct virtio_gpu_device *vgdev)
err_mm_init:
ttm_bo_device_release(&vgdev->mman.bdev);
err_dev_init:
virtio_gpu_ttm_global_fini(vgdev);
return r;
}
void virtio_gpu_ttm_fini(struct virtio_gpu_device *vgdev)
{
ttm_bo_device_release(&vgdev->mman.bdev);
virtio_gpu_ttm_global_fini(vgdev);
DRM_INFO("virtio_gpu: ttm finalized\n");
}

View File

@ -801,11 +801,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
dev_priv->mmio_start, dev_priv->mmio_size / 1024);
ret = vmw_ttm_global_init(dev_priv);
if (unlikely(ret != 0))
goto out_err0;
vmw_master_init(&dev_priv->fbdev_master);
ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
dev_priv->active_master = &dev_priv->fbdev_master;
@ -816,7 +811,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
if (unlikely(dev_priv->mmio_virt == NULL)) {
ret = -ENOMEM;
DRM_ERROR("Failed mapping MMIO.\n");
goto out_err3;
goto out_err0;
}
/* Need mmio memory to check for fifo pitchlock cap. */
@ -870,7 +865,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
}
ret = ttm_bo_device_init(&dev_priv->bdev,
dev_priv->bo_global_ref.ref.object,
&vmw_bo_driver,
dev->anon_inode->i_mapping,
VMWGFX_FILE_PAGE_OFFSET,
@ -992,8 +986,6 @@ out_no_device:
ttm_object_device_release(&dev_priv->tdev);
out_err4:
memunmap(dev_priv->mmio_virt);
out_err3:
vmw_ttm_global_release(dev_priv);
out_err0:
for (i = vmw_res_context; i < vmw_res_max; ++i)
idr_destroy(&dev_priv->res_idr[i]);
@ -1045,7 +1037,6 @@ static void vmw_driver_unload(struct drm_device *dev)
memunmap(dev_priv->mmio_virt);
if (dev_priv->ctx.staged_bindings)
vmw_binding_state_free(dev_priv->ctx.staged_bindings);
vmw_ttm_global_release(dev_priv);
for (i = vmw_res_context; i < vmw_res_max; ++i)
idr_destroy(&dev_priv->res_idr[i]);

View File

@ -417,7 +417,6 @@ enum {
struct vmw_private {
struct ttm_bo_device bdev;
struct ttm_bo_global_ref bo_global_ref;
struct vmw_fifo_state fifo;
@ -841,8 +840,6 @@ extern int vmw_fifo_flush(struct vmw_private *dev_priv,
* TTM glue - vmwgfx_ttm_glue.c
*/
extern int vmw_ttm_global_init(struct vmw_private *dev_priv);
extern void vmw_ttm_global_release(struct vmw_private *dev_priv);
extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
/**

View File

@ -42,30 +42,3 @@ int vmw_mmap(struct file *filp, struct vm_area_struct *vma)
dev_priv = vmw_priv(file_priv->minor->dev);
return ttm_bo_mmap(filp, vma, &dev_priv->bdev);
}
int vmw_ttm_global_init(struct vmw_private *dev_priv)
{
struct drm_global_reference *global_ref;
int ret;
global_ref = &dev_priv->bo_global_ref.ref;
global_ref->global_type = DRM_GLOBAL_TTM_BO;
global_ref->size = sizeof(struct ttm_bo_global);
global_ref->init = &ttm_bo_global_ref_init;
global_ref->release = &ttm_bo_global_ref_release;
ret = drm_global_item_ref(global_ref);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed setting up TTM buffer objects.\n");
goto out_no_bo;
}
return 0;
out_no_bo:
return ret;
}
void vmw_ttm_global_release(struct vmw_private *dev_priv)
{
drm_global_item_unref(&dev_priv->bo_global_ref.ref);
}

View File

@ -99,7 +99,6 @@ struct vbox_private {
int fb_mtrr;
struct {
struct ttm_bo_global_ref bo_global_ref;
struct ttm_bo_device bdev;
} ttm;

View File

@ -35,37 +35,6 @@ static inline struct vbox_private *vbox_bdev(struct ttm_bo_device *bd)
return container_of(bd, struct vbox_private, ttm.bdev);
}
/**
* Adds the vbox memory manager object/structures to the global memory manager.
*/
static int vbox_ttm_global_init(struct vbox_private *vbox)
{
struct drm_global_reference *global_ref;
int ret;
global_ref = &vbox->ttm.bo_global_ref.ref;
global_ref->global_type = DRM_GLOBAL_TTM_BO;
global_ref->size = sizeof(struct ttm_bo_global);
global_ref->init = &ttm_bo_global_ref_init;
global_ref->release = &ttm_bo_global_ref_release;
ret = drm_global_item_ref(global_ref);
if (ret) {
DRM_ERROR("Failed setting up TTM BO subsystem.\n");
return ret;
}
return 0;
}
/**
* Removes the vbox memory manager object from the global memory manager.
*/
static void vbox_ttm_global_release(struct vbox_private *vbox)
{
drm_global_item_unref(&vbox->ttm.bo_global_ref.ref);
}
static void vbox_bo_ttm_destroy(struct ttm_buffer_object *tbo)
{
struct vbox_bo *bo;
@ -203,18 +172,13 @@ int vbox_mm_init(struct vbox_private *vbox)
struct drm_device *dev = &vbox->ddev;
struct ttm_bo_device *bdev = &vbox->ttm.bdev;
ret = vbox_ttm_global_init(vbox);
if (ret)
return ret;
ret = ttm_bo_device_init(&vbox->ttm.bdev,
vbox->ttm.bo_global_ref.ref.object,
&vbox_bo_driver,
dev->anon_inode->i_mapping,
DRM_FILE_PAGE_OFFSET, true);
if (ret) {
DRM_ERROR("Error initialising bo driver; %d\n", ret);
goto err_ttm_global_release;
return ret;
}
ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM,
@ -236,8 +200,6 @@ int vbox_mm_init(struct vbox_private *vbox)
err_device_release:
ttm_bo_device_release(&vbox->ttm.bdev);
err_ttm_global_release:
vbox_ttm_global_release(vbox);
return ret;
}
@ -251,7 +213,6 @@ void vbox_mm_fini(struct vbox_private *vbox)
arch_phys_wc_del(vbox->fb_mtrr);
#endif
ttm_bo_device_release(&vbox->ttm.bdev);
vbox_ttm_global_release(vbox);
}
void vbox_ttm_placement(struct vbox_bo *bo, int domain)

View File

@ -569,9 +569,6 @@ void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem);
void ttm_bo_mem_put_locked(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem);
void ttm_bo_global_release(void);
int ttm_bo_global_init(void);
int ttm_bo_device_release(struct ttm_bo_device *bdev);
/**
@ -589,7 +586,7 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev);
* Returns:
* !0: Failure.
*/
int ttm_bo_device_init(struct ttm_bo_device *bdev, struct ttm_bo_global *glob,
int ttm_bo_device_init(struct ttm_bo_device *bdev,
struct ttm_bo_driver *driver,
struct address_space *mapping,
uint64_t file_page_offset, bool need_dma32);
@ -888,40 +885,4 @@ pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp);
extern const struct ttm_mem_type_manager_func ttm_bo_manager_func;
/**
* struct ttm_bo_global_ref - Argument to initialize a struct ttm_bo_global.
*/
struct ttm_bo_global_ref {
struct drm_global_reference ref;
};
/**
* ttm_bo_global_ref_init
*
* @ref: DRM global reference
*
* Helper function that initializes a struct ttm_bo_global. This function
* is used as init call-back function for DRM global references of type
* DRM_GLOBAL_TTM_BO_REF.
*/
static inline int ttm_bo_global_ref_init(struct drm_global_reference *ref)
{
return ttm_bo_global_init();
}
/**
* ttm_bo_global_ref_release
*
* @ref: DRM global reference
*
* Helper function that releases a struct ttm_bo_global. This function
* is used as release call-back function for DRM global references of type
* DRM_GLOBAL_TTM_BO_REF.
*/
static inline void ttm_bo_global_ref_release(struct drm_global_reference *ref)
{
ttm_bo_global_release();
}
#endif