1
0
Fork 0

drm/exynos: add drm_iommu_attach_device_if_possible()

Every CRTC drivers in Exynos DRM implements the code which checks
whether IOMMU is supported or not, and if supported enable it.
Making new helper for it generalize each CRTC drivers.

Signed-off-by: Hyungwon Hwang <human.hwang@samsung.com>
Signed-off-by: Inki Dae <inki.dae@samsung.com>
hifive-unleashed-5.1
Hyungwon Hwang 2015-06-22 19:05:04 +09:00 committed by Inki Dae
parent 3f46d807f8
commit fc2e013f78
6 changed files with 47 additions and 46 deletions

View File

@ -89,8 +89,9 @@ static void decon_wait_for_vblank(struct exynos_drm_crtc *crtc)
DRM_DEBUG_KMS("vblank wait timed out.\n");
}
static void decon_clear_channel(struct decon_context *ctx)
static void decon_clear_channels(struct exynos_drm_crtc *crtc)
{
struct decon_context *ctx = crtc->ctx;
unsigned int win, ch_enabled = 0;
DRM_DEBUG_KMS("%s\n", __FILE__);
@ -120,27 +121,16 @@ static int decon_ctx_initialize(struct decon_context *ctx,
struct drm_device *drm_dev)
{
struct exynos_drm_private *priv = drm_dev->dev_private;
int ret;
ctx->drm_dev = drm_dev;
ctx->pipe = priv->pipe++;
/* attach this sub driver to iommu mapping if supported. */
if (is_drm_iommu_supported(ctx->drm_dev)) {
int ret;
ret = drm_iommu_attach_device_if_possible(ctx->crtc, drm_dev, ctx->dev);
if (ret)
priv->pipe--;
/*
* If any channel is already active, iommu will throw
* a PAGE FAULT when enabled. So clear any channel if enabled.
*/
decon_clear_channel(ctx);
ret = drm_iommu_attach_device(ctx->drm_dev, ctx->dev);
if (ret) {
DRM_ERROR("drm_iommu_attach failed.\n");
return ret;
}
}
return 0;
return ret;
}
static void decon_ctx_remove(struct decon_context *ctx)
@ -633,6 +623,7 @@ static const struct exynos_drm_crtc_ops decon_crtc_ops = {
.wait_for_vblank = decon_wait_for_vblank,
.win_commit = decon_win_commit,
.win_disable = decon_win_disable,
.clear_channels = decon_clear_channels,
};

View File

@ -177,6 +177,7 @@ struct exynos_drm_crtc_ops {
void (*win_disable)(struct exynos_drm_crtc *crtc, unsigned int zpos);
void (*te_handler)(struct exynos_drm_crtc *crtc);
void (*clock_enable)(struct exynos_drm_crtc *crtc, bool enable);
void (*clear_channels)(struct exynos_drm_crtc *crtc);
};
/*

View File

@ -298,8 +298,9 @@ static void fimd_enable_shadow_channel_path(struct fimd_context *ctx,
writel(val, ctx->regs + SHADOWCON);
}
static void fimd_clear_channel(struct fimd_context *ctx)
static void fimd_clear_channels(struct exynos_drm_crtc *crtc)
{
struct fimd_context *ctx = crtc->ctx;
unsigned int win, ch_enabled = 0;
DRM_DEBUG_KMS("%s\n", __FILE__);
@ -347,30 +348,6 @@ static void fimd_clear_channel(struct fimd_context *ctx)
pm_runtime_put(ctx->dev);
}
static int fimd_iommu_attach_devices(struct fimd_context *ctx,
struct drm_device *drm_dev)
{
/* attach this sub driver to iommu mapping if supported. */
if (is_drm_iommu_supported(ctx->drm_dev)) {
int ret;
/*
* If any channel is already active, iommu will throw
* a PAGE FAULT when enabled. So clear any channel if enabled.
*/
fimd_clear_channel(ctx);
ret = drm_iommu_attach_device(ctx->drm_dev, ctx->dev);
if (ret) {
DRM_ERROR("drm_iommu_attach failed.\n");
return ret;
}
}
return 0;
}
static void fimd_iommu_detach_devices(struct fimd_context *ctx)
{
/* detach this sub driver from iommu mapping if supported. */
@ -917,6 +894,7 @@ static const struct exynos_drm_crtc_ops fimd_crtc_ops = {
.win_disable = fimd_win_disable,
.te_handler = fimd_te_handler,
.clock_enable = fimd_dp_clock_enable,
.clear_channels = fimd_clear_channels,
};
static irqreturn_t fimd_irq_handler(int irq, void *dev_id)
@ -986,7 +964,11 @@ static int fimd_bind(struct device *dev, struct device *master, void *data)
if (ctx->display)
exynos_drm_create_enc_conn(drm_dev, ctx->display);
return fimd_iommu_attach_devices(ctx, drm_dev);
ret = drm_iommu_attach_device_if_possible(ctx->crtc, drm_dev, dev);
if (ret)
priv->pipe--;
return ret;
}
static void fimd_unbind(struct device *dev, struct device *master,

View File

@ -144,3 +144,17 @@ void drm_iommu_detach_device(struct drm_device *drm_dev,
iommu_detach_device(mapping->domain, subdrv_dev);
drm_release_iommu_mapping(drm_dev);
}
int drm_iommu_attach_device_if_possible(struct exynos_drm_crtc *exynos_crtc,
struct drm_device *drm_dev, struct device *subdrv_dev)
{
int ret = 0;
if (is_drm_iommu_supported(drm_dev)) {
if (exynos_crtc->ops->clear_channels)
exynos_crtc->ops->clear_channels(exynos_crtc);
return drm_iommu_attach_device(drm_dev, subdrv_dev);
}
return ret;
}

View File

@ -38,6 +38,10 @@ static inline bool is_drm_iommu_supported(struct drm_device *drm_dev)
#endif
}
int drm_iommu_attach_device_if_possible(
struct exynos_drm_crtc *exynos_crtc, struct drm_device *drm_dev,
struct device *subdrv_dev);
#else
static inline int drm_create_iommu_mapping(struct drm_device *drm_dev)
@ -65,5 +69,12 @@ static inline bool is_drm_iommu_supported(struct drm_device *drm_dev)
return false;
}
static inline int drm_iommu_attach_device_if_possible(
struct exynos_drm_crtc *exynos_crtc, struct drm_device *drm_dev,
struct device *subdrv_dev)
{
return 0;
}
#endif
#endif

View File

@ -882,10 +882,12 @@ static int mixer_initialize(struct mixer_context *mixer_ctx,
}
}
if (!is_drm_iommu_supported(mixer_ctx->drm_dev))
return 0;
ret = drm_iommu_attach_device_if_possible(mixer_ctx->crtc, drm_dev,
mixer_ctx->dev);
if (ret)
priv->pipe--;
return drm_iommu_attach_device(mixer_ctx->drm_dev, mixer_ctx->dev);
return ret;
}
static void mixer_ctx_remove(struct mixer_context *mixer_ctx)