1
0
Fork 0

drm/msm: Split msm_gem_get_iova into two steps

Split the operation of msm_gem_get_iova into two operations:
1) allocate an iova and 2) map (pin) the backing memory int the
iommu. This is the first step toward allowing memory pinning
to occur independently of the iova management.

Signed-off-by: Jordan Crouse <jcrouse@codeaurora.org>
Signed-off-by: Rob Clark <robdclark@gmail.com>
hifive-unleashed-5.1
Jordan Crouse 2018-11-07 15:35:48 -07:00 committed by Rob Clark
parent 70dc51b447
commit c0ee979469
4 changed files with 86 additions and 41 deletions

View File

@ -241,6 +241,8 @@ struct drm_atomic_state *msm_atomic_state_alloc(struct drm_device *dev);
void msm_atomic_state_clear(struct drm_atomic_state *state);
void msm_atomic_state_free(struct drm_atomic_state *state);
int msm_gem_init_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma, int npages);
void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma);
int msm_gem_map_vma(struct msm_gem_address_space *aspace,

View File

@ -357,52 +357,76 @@ put_iova(struct drm_gem_object *obj)
}
}
/* get iova, taking a reference. Should have a matching put */
int msm_gem_get_iova(struct drm_gem_object *obj,
static int msm_gem_get_iova_locked(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace, uint64_t *iova)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct msm_gem_vma *vma;
int ret = 0;
mutex_lock(&msm_obj->lock);
if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
mutex_unlock(&msm_obj->lock);
return -EBUSY;
}
WARN_ON(!mutex_is_locked(&msm_obj->lock));
vma = lookup_vma(obj, aspace);
if (!vma) {
struct page **pages;
vma = add_vma(obj, aspace);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto unlock;
}
if (IS_ERR(vma))
return PTR_ERR(vma);
pages = get_pages(obj);
if (IS_ERR(pages)) {
ret = PTR_ERR(pages);
goto fail;
ret = msm_gem_init_vma(aspace, vma, obj->size >> PAGE_SHIFT);
if (ret) {
del_vma(vma);
return ret;
}
ret = msm_gem_map_vma(aspace, vma, msm_obj->sgt,
obj->size >> PAGE_SHIFT);
if (ret)
goto fail;
}
*iova = vma->iova;
mutex_unlock(&msm_obj->lock);
return 0;
}
static int msm_gem_pin_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct msm_gem_vma *vma;
struct page **pages;
WARN_ON(!mutex_is_locked(&msm_obj->lock));
if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
return -EBUSY;
vma = lookup_vma(obj, aspace);
if (WARN_ON(!vma))
return -EINVAL;
pages = get_pages(obj);
if (IS_ERR(pages))
return PTR_ERR(pages);
return msm_gem_map_vma(aspace, vma, msm_obj->sgt,
obj->size >> PAGE_SHIFT);
}
/* get iova, taking a reference. Should have a matching put */
int msm_gem_get_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace, uint64_t *iova)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
u64 local;
int ret;
mutex_lock(&msm_obj->lock);
ret = msm_gem_get_iova_locked(obj, aspace, &local);
if (!ret)
ret = msm_gem_pin_iova(obj, aspace);
if (!ret)
*iova = local;
fail:
del_vma(vma);
unlock:
mutex_unlock(&msm_obj->lock);
return ret;
}

View File

@ -41,6 +41,7 @@ struct msm_gem_vma {
uint64_t iova;
struct msm_gem_address_space *aspace;
struct list_head list; /* node in msm_gem_object::vmas */
bool mapped;
};
struct msm_gem_object {

View File

@ -55,6 +55,7 @@ msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
spin_unlock(&aspace->lock);
vma->iova = 0;
vma->mapped = false;
msm_gem_address_space_put(aspace);
}
@ -62,15 +63,38 @@ msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
int
msm_gem_map_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma, struct sg_table *sgt, int npages)
{
unsigned size = npages << PAGE_SHIFT;
int ret = 0;
if (WARN_ON(!vma->iova))
return -EINVAL;
if (vma->mapped)
return 0;
vma->mapped = true;
if (aspace->mmu)
ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt,
size, IOMMU_READ | IOMMU_WRITE);
if (ret)
vma->mapped = false;
return ret;
}
/* Initialize a new vma and allocate an iova for it */
int msm_gem_init_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma, int npages)
{
int ret;
spin_lock(&aspace->lock);
if (WARN_ON(drm_mm_node_allocated(&vma->node))) {
spin_unlock(&aspace->lock);
return 0;
}
if (WARN_ON(vma->iova))
return -EBUSY;
spin_lock(&aspace->lock);
ret = drm_mm_insert_node(&aspace->mm, &vma->node, npages);
spin_unlock(&aspace->lock);
@ -78,17 +102,11 @@ msm_gem_map_vma(struct msm_gem_address_space *aspace,
return ret;
vma->iova = vma->node.start << PAGE_SHIFT;
vma->mapped = false;
if (aspace->mmu) {
unsigned size = npages << PAGE_SHIFT;
ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt,
size, IOMMU_READ | IOMMU_WRITE);
}
/* Get a reference to the aspace to keep it around */
kref_get(&aspace->kref);
return ret;
return 0;
}
struct msm_gem_address_space *