diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index a405b75a90d9..d9e7c4023677 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -196,8 +196,8 @@ nouveau_bo_new(struct drm_device *dev, int size, int align, int lpg_shift = 12; int max_size; - if (drm->client.base.vm) - lpg_shift = drm->client.base.vm->vmm->lpg_shift; + if (drm->client.vm) + lpg_shift = drm->client.vm->vmm->lpg_shift; max_size = INT_MAX & ~((1 << lpg_shift) - 1); if (size <= 0 || size > max_size) { @@ -219,9 +219,9 @@ nouveau_bo_new(struct drm_device *dev, int size, int align, nvbo->bo.bdev = &drm->ttm.bdev; nvbo->page_shift = 12; - if (drm->client.base.vm) { + if (drm->client.vm) { if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024) - nvbo->page_shift = drm->client.base.vm->vmm->lpg_shift; + nvbo->page_shift = drm->client.vm->vmm->lpg_shift; } nouveau_bo_fixup_align(nvbo, flags, &align, &size); @@ -929,12 +929,12 @@ nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo, u64 size = (u64)mem->num_pages << PAGE_SHIFT; int ret; - ret = nouveau_vm_get(nv_client(drm)->vm, size, old_node->page_shift, + ret = nouveau_vm_get(drm->client.vm, size, old_node->page_shift, NV_MEM_ACCESS_RW, &old_node->vma[0]); if (ret) return ret; - ret = nouveau_vm_get(nv_client(drm)->vm, size, new_node->page_shift, + ret = nouveau_vm_get(drm->client.vm, size, new_node->page_shift, NV_MEM_ACCESS_RW, &old_node->vma[1]); if (ret) { nouveau_vm_put(&old_node->vma[0]); diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 64ec1d34b484..fe99765d9fa0 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -415,9 +415,11 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags) if (device->card_type >= NV_50) { ret = nouveau_vm_new(nv_device(drm->device), 0, (1ULL << 40), - 0x1000, &drm->client.base.vm); + 0x1000, &drm->client.vm); if (ret) goto fail_device; + + drm->client.base.vm = drm->client.vm; } ret = nouveau_ttm_init(drm); @@ -725,11 +727,13 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv) if (nv_device(drm->device)->card_type >= NV_50) { ret = nouveau_vm_new(nv_device(drm->device), 0, (1ULL << 40), - 0x1000, &cli->base.vm); + 0x1000, &cli->vm); if (ret) { nouveau_cli_destroy(cli); goto out_suspend; } + + cli->base.vm = cli->vm; } fpriv->driver_priv = cli; diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h index 029e9b6568bd..ab2060767ba9 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.h +++ b/drivers/gpu/drm/nouveau/nouveau_drm.h @@ -65,6 +65,7 @@ enum nouveau_drm_handle { struct nouveau_cli { struct nouveau_client base; + struct nouveau_vm *vm; /*XXX*/ struct list_head head; struct mutex mutex; void *abi16; diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index df9d451afdcd..abb7321b35c8 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -58,14 +58,14 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv) struct nouveau_vma *vma; int ret; - if (!cli->base.vm) + if (!cli->vm) return 0; ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL); if (ret) return ret; - vma = nouveau_bo_vma_find(nvbo, cli->base.vm); + vma = nouveau_bo_vma_find(nvbo, cli->vm); if (!vma) { vma = kzalloc(sizeof(*vma), GFP_KERNEL); if (!vma) { @@ -73,7 +73,7 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv) goto out; } - ret = nouveau_bo_vma_add(nvbo, cli->base.vm, vma); + ret = nouveau_bo_vma_add(nvbo, cli->vm, vma); if (ret) { kfree(vma); goto out; @@ -129,14 +129,14 @@ nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv) struct nouveau_vma *vma; int ret; - if (!cli->base.vm) + if (!cli->vm) return; ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL); if (ret) return; - vma = nouveau_bo_vma_find(nvbo, cli->base.vm); + vma = nouveau_bo_vma_find(nvbo, cli->vm); if (vma) { if (--vma->refcount == 0) nouveau_gem_object_unmap(nvbo, vma); @@ -202,8 +202,8 @@ nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem, rep->domain = NOUVEAU_GEM_DOMAIN_VRAM; rep->offset = nvbo->bo.offset; - if (cli->base.vm) { - vma = nouveau_bo_vma_find(nvbo, cli->base.vm); + if (cli->vm) { + vma = nouveau_bo_vma_find(nvbo, cli->vm); if (!vma) return -EINVAL; diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c index 9fd475c89820..7103a771aa87 100644 --- a/drivers/gpu/drm/nouveau/nv84_fence.c +++ b/drivers/gpu/drm/nouveau/nv84_fence.c @@ -140,7 +140,7 @@ int nv84_fence_context_new(struct nouveau_channel *chan) { struct nouveau_fifo_chan *fifo = (void *)chan->object; - struct nouveau_client *client = nouveau_client(fifo); + struct nouveau_cli *cli = chan->cli; struct nv84_fence_priv *priv = chan->drm->fence; struct nv84_fence_chan *fctx; int ret, i; @@ -156,16 +156,16 @@ nv84_fence_context_new(struct nouveau_channel *chan) fctx->base.emit32 = nv84_fence_emit32; fctx->base.sync32 = nv84_fence_sync32; - ret = nouveau_bo_vma_add(priv->bo, client->vm, &fctx->vma); + ret = nouveau_bo_vma_add(priv->bo, cli->vm, &fctx->vma); if (ret == 0) { - ret = nouveau_bo_vma_add(priv->bo_gart, client->vm, + ret = nouveau_bo_vma_add(priv->bo_gart, cli->vm, &fctx->vma_gart); } /* map display semaphore buffers into channel's vm */ for (i = 0; !ret && i < chan->drm->dev->mode_config.num_crtc; i++) { struct nouveau_bo *bo = nv50_display_crtc_sema(chan->drm->dev, i); - ret = nouveau_bo_vma_add(bo, client->vm, &fctx->dispc_vma[i]); + ret = nouveau_bo_vma_add(bo, cli->vm, &fctx->dispc_vma[i]); } nouveau_bo_wr32(priv->bo, fifo->chid * 16/4, 0x00000000);