1
0
Fork 0

MLK-15925-3 drm/imx: dpu: kms: Avoid plane src hot migration between 2 disps

The DPU fetch units(backing DRM planes) are shared by two displays(a.k.a,
CRTCs).  Since the shadow trigger/load mechanism of each display(CRTC)
is independent from each other, on-the-fly/hot migration of plane source
is likely to cause resouce conflict issue when the shadow registers are
loaded.  This patch changes the way we assign fetch units for each DRM
planes so that we may avoid the migrations from happening.  Thanks to
the DRM atomic check nature, cold migrations still can be supported.

Signed-off-by: Liu Ying <victor.liu@nxp.com>
pull/10/head
Liu Ying 2017-07-07 17:41:34 +08:00 committed by Jason Liu
parent 9b0d0c0779
commit 24f036984b
4 changed files with 62 additions and 7 deletions

View File

@ -299,11 +299,18 @@ static void dpu_crtc_atomic_flush(struct drm_crtc *crtc,
struct dpu_plane *dplane = to_dpu_plane(crtc->primary);
struct dpu_plane_res *res = &dplane->grp->res;
struct dpu_extdst *ed = res->ed[dplane->stream_id];
int i;
if (!crtc->state->enable && !old_crtc_state->enable)
return;
extdst_pixengcfg_sync_trigger(ed);
for (i = 0; i < ARRAY_SIZE(res->fd); i++) {
if (res->fd[i] && !fetchdecode_is_enabled(res->fd[i]))
fetchdecode_set_stream_id(res->fd[i],
DPU_PLANE_SRC_DISABLED);
}
}
static void dpu_crtc_mode_set_nofb(struct drm_crtc *crtc)

View File

@ -125,31 +125,60 @@ dpu_atomic_set_top_plane_per_crtc(struct drm_plane_state **states, int n)
}
}
static void
static int
dpu_atomic_assign_plane_source_per_crtc(struct drm_plane_state **states, int n)
{
struct dpu_plane_state *dpstate;
struct dpu_plane *dplane;
unsigned int sid;
int i, j;
struct dpu_plane_grp *grp;
struct dpu_fetchdecode *fd;
unsigned int sid, src_sid;
int i, j, k;
int fd_id;
/* for active planes only */
for (i = 0; i < n; i++) {
dpstate = to_dpu_plane_state(states[i]);
dplane = to_dpu_plane(states[i]->plane);
grp = dplane->grp;
sid = dplane->stream_id;
/* assign source */
for (k = 0; k < grp->hw_plane_num; k++) {
/* already used by others? */
if (grp->src_mask & BIT(k))
continue;
fd_id = source_to_id(sources[k]);
fd = grp->res.fd[fd_id];
/* avoid on-the-fly/hot migration */
src_sid = fetchdecode_get_stream_id(fd);
if (src_sid && src_sid != BIT(sid))
continue;
grp->src_mask |= BIT(k);
break;
}
if (k == grp->hw_plane_num)
return -EINVAL;
dpstate->source = sources[k];
/* assign stage and blend */
if (sid) {
j = dplane->grp->hw_plane_num - (n - i);
j = grp->hw_plane_num - (n - i);
dpstate->stage = i ? stages[j - 1] : cf_stages[sid];
dpstate->source = sources[j];
dpstate->blend = blends[j];
} else {
dpstate->stage = i ? stages[i - 1] : cf_stages[sid];
dpstate->source = sources[i];
dpstate->blend = blends[i];
}
}
return 0;
}
static int dpu_drm_atomic_check(struct drm_device *dev,
@ -190,6 +219,12 @@ static int dpu_drm_atomic_check(struct drm_device *dev,
if (grp[i] && active_plane[i] > grp[i]->hw_plane_num)
return -EINVAL;
/* clear source mask */
for (i = 0; i < MAX_DPU_PLANE_GRP; i++) {
if (grp[i])
grp[i]->src_mask = 0;
}
ret = drm_atomic_helper_check_modeset(dev, state);
if (ret)
return ret;
@ -232,7 +267,11 @@ static int dpu_drm_atomic_check(struct drm_device *dev,
dpu_atomic_set_top_plane_per_crtc(states, n);
dpu_atomic_assign_plane_source_per_crtc(states, n);
ret = dpu_atomic_assign_plane_source_per_crtc(states, n);
if (ret) {
kfree(states);
return ret;
}
kfree(states);
}

View File

@ -243,6 +243,9 @@ static void dpu_plane_atomic_update(struct drm_plane *plane,
fetchdecode_layerproperty(fd, true);
fetchdecode_framedimensions(fd, src_w, src_h);
fetchdecode_baseaddress(fd, drm_plane_state_to_baseaddr(state));
fetchdecode_set_stream_id(fd, dplane->stream_id ?
DPU_PLANE_SRC_TO_DISP_STREAM1 :
DPU_PLANE_SRC_TO_DISP_STREAM0);
layerblend_pixengcfg_dynamic_prim_sel(lb, dpstate->stage);
layerblend_pixengcfg_dynamic_sec_sel(lb, dpstate->source);

View File

@ -521,6 +521,12 @@ struct dpu_plane_grp {
struct mutex lock;
unsigned int hw_plane_num;
unsigned int id;
/*
* used when assigning plane source
* index: 0 1 2 3
* source: fd0 fd1 fd2 fd3
*/
u32 src_mask;
};
static inline struct dpu_plane_grp *plane_res_to_grp(struct dpu_plane_res *res)