drm/exynos: stop using sgtable in page fault handler
Already struct exynos_drm_gem_buf has pages of the buffer when buffer is created, so just can use pages in page fault handler, we don't have to make sgtable of the buffer. But this needs to construct pages of the buffer that is imported from dma-buf prime. Signed-off-by: Joonyoung Shim <jy0922.shim@samsung.com> Signed-off-by: Inki Dae <inki.dae@samsung.com>
This commit is contained in:
parent
2b8376c803
commit
8139951cae
|
@ -90,23 +90,12 @@ static int lowlevel_buffer_allocate(struct drm_device *dev,
|
|||
}
|
||||
}
|
||||
|
||||
buf->sgt = drm_prime_pages_to_sg(buf->pages, nr_pages);
|
||||
if (IS_ERR(buf->sgt)) {
|
||||
DRM_ERROR("failed to get sg table.\n");
|
||||
ret = PTR_ERR(buf->sgt);
|
||||
goto err_free_attrs;
|
||||
}
|
||||
|
||||
DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
|
||||
(unsigned long)buf->dma_addr,
|
||||
buf->size);
|
||||
|
||||
return ret;
|
||||
|
||||
err_free_attrs:
|
||||
dma_free_attrs(dev->dev, buf->size, buf->pages,
|
||||
(dma_addr_t)buf->dma_addr, &buf->dma_attrs);
|
||||
buf->dma_addr = (dma_addr_t)NULL;
|
||||
err_free:
|
||||
if (!is_drm_iommu_supported(dev))
|
||||
drm_free_large(buf->pages);
|
||||
|
@ -126,11 +115,6 @@ static void lowlevel_buffer_deallocate(struct drm_device *dev,
|
|||
(unsigned long)buf->dma_addr,
|
||||
buf->size);
|
||||
|
||||
sg_free_table(buf->sgt);
|
||||
|
||||
kfree(buf->sgt);
|
||||
buf->sgt = NULL;
|
||||
|
||||
if (!is_drm_iommu_supported(dev)) {
|
||||
dma_free_attrs(dev->dev, buf->size, buf->cookie,
|
||||
(dma_addr_t)buf->dma_addr, &buf->dma_attrs);
|
||||
|
|
|
@ -203,6 +203,7 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
|
|||
struct scatterlist *sgl;
|
||||
struct exynos_drm_gem_obj *exynos_gem_obj;
|
||||
struct exynos_drm_gem_buf *buffer;
|
||||
int npages;
|
||||
int ret;
|
||||
|
||||
/* is this one of own objects? */
|
||||
|
@ -251,6 +252,20 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
|
|||
buffer->size = dma_buf->size;
|
||||
buffer->dma_addr = sg_dma_address(sgl);
|
||||
|
||||
npages = dma_buf->size >> PAGE_SHIFT;
|
||||
buffer->pages = drm_malloc_ab(npages, sizeof(struct page *));
|
||||
if (!buffer->pages) {
|
||||
ret = -ENOMEM;
|
||||
goto err_free_gem;
|
||||
}
|
||||
|
||||
ret = drm_prime_sg_to_page_addr_arrays(sgt, buffer->pages, NULL,
|
||||
npages);
|
||||
if (ret < 0) {
|
||||
drm_free_large(buffer->pages);
|
||||
goto err_free_gem;
|
||||
}
|
||||
|
||||
if (sgt->nents == 1) {
|
||||
/* always physically continuous memory if sgt->nents is 1. */
|
||||
exynos_gem_obj->flags |= EXYNOS_BO_CONTIG;
|
||||
|
@ -273,6 +288,9 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
|
|||
|
||||
return &exynos_gem_obj->base;
|
||||
|
||||
err_free_gem:
|
||||
drm_gem_object_release(&exynos_gem_obj->base);
|
||||
kfree(exynos_gem_obj);
|
||||
err_free_buffer:
|
||||
kfree(buffer);
|
||||
buffer = NULL;
|
||||
|
|
|
@ -83,26 +83,14 @@ static int exynos_drm_gem_map_buf(struct drm_gem_object *obj,
|
|||
{
|
||||
struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
|
||||
struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
|
||||
struct scatterlist *sgl;
|
||||
unsigned long pfn;
|
||||
int i;
|
||||
|
||||
if (!buf->sgt)
|
||||
return -EINTR;
|
||||
|
||||
if (page_offset >= (buf->size >> PAGE_SHIFT)) {
|
||||
DRM_ERROR("invalid page offset\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
sgl = buf->sgt->sgl;
|
||||
for_each_sg(buf->sgt->sgl, sgl, buf->sgt->nents, i) {
|
||||
if (page_offset < (sgl->length >> PAGE_SHIFT))
|
||||
break;
|
||||
page_offset -= (sgl->length >> PAGE_SHIFT);
|
||||
}
|
||||
|
||||
pfn = __phys_to_pfn(sg_phys(sgl)) + page_offset;
|
||||
pfn = page_to_pfn(buf->pages[page_offset]);
|
||||
|
||||
return vm_insert_mixed(vma, f_vaddr, pfn);
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue