1
0
Fork 0

dma_buf: remove device parameter from attach callback v2

The device parameter is completely unused because it is available in the
attachment structure as well.

v2: fix kerneldoc as well

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/226643/
hifive-unleashed-5.1
Christian König 2018-05-28 11:47:52 +02:00 committed by Christian König
parent 2227a7a219
commit a19741e5e5
10 changed files with 13 additions and 18 deletions

View File

@ -568,7 +568,7 @@ struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
mutex_lock(&dmabuf->lock);
if (dmabuf->ops->attach) {
ret = dmabuf->ops->attach(dmabuf, dev, attach);
ret = dmabuf->ops->attach(dmabuf, attach);
if (ret)
goto err_attach;
}

View File

@ -127,7 +127,6 @@ error:
}
static int amdgpu_gem_map_attach(struct dma_buf *dma_buf,
struct device *target_dev,
struct dma_buf_attachment *attach)
{
struct drm_gem_object *obj = dma_buf->priv;
@ -135,7 +134,7 @@ static int amdgpu_gem_map_attach(struct dma_buf *dma_buf,
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
long r;
r = drm_gem_map_attach(dma_buf, target_dev, attach);
r = drm_gem_map_attach(dma_buf, attach);
if (r)
return r;

View File

@ -186,7 +186,6 @@ static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpri
/**
* drm_gem_map_attach - dma_buf attach implementation for GEM
* @dma_buf: buffer to attach device to
* @target_dev: not used
* @attach: buffer attachment data
*
* Allocates &drm_prime_attachment and calls &drm_driver.gem_prime_pin for
@ -195,7 +194,7 @@ static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpri
*
* Returns 0 on success, negative error code on failure.
*/
int drm_gem_map_attach(struct dma_buf *dma_buf, struct device *target_dev,
int drm_gem_map_attach(struct dma_buf *dma_buf,
struct dma_buf_attachment *attach)
{
struct drm_prime_attachment *prime_attach;

View File

@ -29,7 +29,6 @@ struct udl_drm_dmabuf_attachment {
};
static int udl_attach_dma_buf(struct dma_buf *dmabuf,
struct device *dev,
struct dma_buf_attachment *attach)
{
struct udl_drm_dmabuf_attachment *udl_attach;

View File

@ -40,7 +40,6 @@
*/
static int vmw_prime_map_attach(struct dma_buf *dma_buf,
struct device *target_dev,
struct dma_buf_attachment *attach)
{
return -ENOSYS;

View File

@ -222,7 +222,7 @@ struct vb2_dc_attachment {
enum dma_data_direction dma_dir;
};
static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev,
static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf,
struct dma_buf_attachment *dbuf_attach)
{
struct vb2_dc_attachment *attach;

View File

@ -371,7 +371,7 @@ struct vb2_dma_sg_attachment {
enum dma_data_direction dma_dir;
};
static int vb2_dma_sg_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev,
static int vb2_dma_sg_dmabuf_ops_attach(struct dma_buf *dbuf,
struct dma_buf_attachment *dbuf_attach)
{
struct vb2_dma_sg_attachment *attach;

View File

@ -209,7 +209,7 @@ struct vb2_vmalloc_attachment {
enum dma_data_direction dma_dir;
};
static int vb2_vmalloc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev,
static int vb2_vmalloc_dmabuf_ops_attach(struct dma_buf *dbuf,
struct dma_buf_attachment *dbuf_attach)
{
struct vb2_vmalloc_attachment *attach;

View File

@ -82,7 +82,7 @@ int drm_gem_prime_fd_to_handle(struct drm_device *dev,
struct dma_buf *drm_gem_dmabuf_export(struct drm_device *dev,
struct dma_buf_export_info *exp_info);
void drm_gem_dmabuf_release(struct dma_buf *dma_buf);
int drm_gem_map_attach(struct dma_buf *dma_buf, struct device *target_dev,
int drm_gem_map_attach(struct dma_buf *dma_buf,
struct dma_buf_attachment *attach);
void drm_gem_map_detach(struct dma_buf *dma_buf,
struct dma_buf_attachment *attach);

View File

@ -55,11 +55,11 @@ struct dma_buf_ops {
* @attach:
*
* This is called from dma_buf_attach() to make sure that a given
* &device can access the provided &dma_buf. Exporters which support
* buffer objects in special locations like VRAM or device-specific
* carveout areas should check whether the buffer could be move to
* system memory (or directly accessed by the provided device), and
* otherwise need to fail the attach operation.
* &dma_buf_attachment.dev can access the provided &dma_buf. Exporters
* which support buffer objects in special locations like VRAM or
* device-specific carveout areas should check whether the buffer could
* be move to system memory (or directly accessed by the provided
* device), and otherwise need to fail the attach operation.
*
* The exporter should also in general check whether the current
* allocation fullfills the DMA constraints of the new device. If this
@ -77,8 +77,7 @@ struct dma_buf_ops {
* to signal that backing storage is already allocated and incompatible
* with the requirements of requesting device.
*/
int (*attach)(struct dma_buf *, struct device *,
struct dma_buf_attachment *);
int (*attach)(struct dma_buf *, struct dma_buf_attachment *);
/**
* @detach: