1
0
Fork 0

drm/amdgpu: sync bo and shadow V3

Use shadow flag to judge which direction to sync.
V2:
Don't need bo pin, so remove it.

V3:
1. Split to two functions, one is backup_to_shadow, another is
restore_from_shadow.
2. Clean up previous shadow direction difinitions.

Signed-off-by: Chunming Zhou <David1.Zhou@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
hifive-unleashed-5.1
Chunming Zhou 2016-08-04 16:51:18 +08:00 committed by Alex Deucher
parent e24db98529
commit 20f4eff1c8
3 changed files with 76 additions and 8 deletions

View File

@ -438,12 +438,6 @@ struct amdgpu_bo_va {
#define AMDGPU_GEM_DOMAIN_MAX 0x3
enum amdgpu_bo_shadow {
AMDGPU_BO_SHADOW_TO_NONE = 0,
AMDGPU_BO_SHADOW_TO_PARENT,
AMDGPU_BO_SHADOW_TO_SHADOW,
};
struct amdgpu_bo {
/* Protected by gem.mutex */
struct list_head list;
@ -470,8 +464,6 @@ struct amdgpu_bo {
struct drm_gem_object gem_base;
struct amdgpu_bo *parent;
struct amdgpu_bo *shadow;
/* indicate if need to sync between bo and shadow */
enum amdgpu_bo_shadow backup_shadow;
struct ttm_bo_kmap_obj dma_buf_vmap;
struct amdgpu_mn *mn;

View File

@ -445,6 +445,70 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
return r;
}
int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
struct amdgpu_ring *ring,
struct amdgpu_bo *bo,
struct reservation_object *resv,
struct fence **fence,
bool direct)
{
struct amdgpu_bo *shadow = bo->shadow;
uint64_t bo_addr, shadow_addr;
int r;
if (!shadow)
return -EINVAL;
bo_addr = amdgpu_bo_gpu_offset(bo);
shadow_addr = amdgpu_bo_gpu_offset(bo->shadow);
r = reservation_object_reserve_shared(bo->tbo.resv);
if (r)
goto err;
r = amdgpu_copy_buffer(ring, bo_addr, shadow_addr,
amdgpu_bo_size(bo), resv, fence,
direct);
if (!r)
amdgpu_bo_fence(bo, *fence, true);
err:
return r;
}
int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev,
struct amdgpu_ring *ring,
struct amdgpu_bo *bo,
struct reservation_object *resv,
struct fence **fence,
bool direct)
{
struct amdgpu_bo *shadow = bo->shadow;
uint64_t bo_addr, shadow_addr;
int r;
if (!shadow)
return -EINVAL;
bo_addr = amdgpu_bo_gpu_offset(bo);
shadow_addr = amdgpu_bo_gpu_offset(bo->shadow);
r = reservation_object_reserve_shared(bo->tbo.resv);
if (r)
goto err;
r = amdgpu_copy_buffer(ring, shadow_addr, bo_addr,
amdgpu_bo_size(bo), resv, fence,
direct);
if (!r)
amdgpu_bo_fence(bo, *fence, true);
err:
return r;
}
int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
{
bool is_iomem;

View File

@ -155,6 +155,18 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
void amdgpu_bo_fence(struct amdgpu_bo *bo, struct fence *fence,
bool shared);
u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo);
int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
struct amdgpu_ring *ring,
struct amdgpu_bo *bo,
struct reservation_object *resv,
struct fence **fence, bool direct);
int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev,
struct amdgpu_ring *ring,
struct amdgpu_bo *bo,
struct reservation_object *resv,
struct fence **fence,
bool direct);
/*
* sub allocation