1
0
Fork 0

Merge commit 'refs/for-upstream/mali-dp' of git://linux-arm.org/linux-ld into drm-next

Picking up pace on the upstreaming of Komeda driver, with quite a lot
of new features added this time. On top of that we have the small
cleanups and improved usage of the debugfs functions. Please pull!

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Liviu Dudau <Liviu.Dudau@arm.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190621095349.GI17204@e110455-lin.cambridge.arm.com
alistair/sunxi64-5.4-dsi
Dave Airlie 2019-06-25 12:49:08 +10:00
commit 9b75276e4f
25 changed files with 2729 additions and 228 deletions

View File

@ -7,10 +7,13 @@ Required properties:
- clocks: A list of phandle + clock-specifier pairs, one for each entry
in 'clock-names'
- clock-names: A list of clock names. It should contain:
- "mclk": for the main processor clock
- "pclk": for the APB interface clock
- "aclk": for the main processor clock
- #address-cells: Must be 1
- #size-cells: Must be 0
- iommus: configure the stream id to IOMMU, Must be configured if want to
enable iommu in display. for how to configure this node please reference
devicetree/bindings/iommu/arm,smmu-v3.txt,
devicetree/bindings/iommu/iommu.txt
Required properties for sub-node: pipeline@nq
Each device contains one or two pipeline sub-nodes (at least one), each
@ -20,7 +23,6 @@ pipeline node should provide properties:
in 'clock-names'
- clock-names: should contain:
- "pxclk": pixel clock
- "aclk": AXI interface clock
- port: each pipeline connect to an encoder input port. The connection is
modeled using the OF graph bindings specified in
@ -42,12 +44,15 @@ Example:
compatible = "arm,mali-d71";
reg = <0xc00000 0x20000>;
interrupts = <0 168 4>;
clocks = <&dpu_mclk>, <&dpu_aclk>;
clock-names = "mclk", "pclk";
clocks = <&dpu_aclk>;
clock-names = "aclk";
iommus = <&smmu 0>, <&smmu 1>, <&smmu 2>, <&smmu 3>,
<&smmu 4>, <&smmu 5>, <&smmu 6>, <&smmu 7>,
<&smmu 8>, <&smmu 9>;
dp0_pipe0: pipeline@0 {
clocks = <&fpgaosc2>, <&dpu_aclk>;
clock-names = "pxclk", "aclk";
clocks = <&fpgaosc2>;
clock-names = "pxclk";
reg = <0>;
port {
@ -58,8 +63,8 @@ Example:
};
dp0_pipe1: pipeline@1 {
clocks = <&fpgaosc2>, <&dpu_aclk>;
clock-names = "pxclk", "aclk";
clocks = <&fpgaosc2>;
clock-names = "pxclk";
reg = <1>;
port {

View File

@ -21,6 +21,13 @@ malidp_write32(u32 __iomem *base, u32 offset, u32 v)
writel(v, (base + (offset >> 2)));
}
static inline void
malidp_write64(u32 __iomem *base, u32 offset, u64 v)
{
writel(lower_32_bits(v), (base + (offset >> 2)));
writel(upper_32_bits(v), (base + (offset >> 2) + 1));
}
static inline void
malidp_write32_mask(u32 __iomem *base, u32 offset, u32 m, u32 v)
{

View File

@ -8,6 +8,7 @@
#define _MALIDP_UTILS_
#include <linux/delay.h>
#include <linux/errno.h>
#define has_bit(nr, mask) (BIT(nr) & (mask))
#define has_bits(bits, mask) (((bits) & (mask)) == (bits))
@ -20,11 +21,9 @@
int num_tries = __tries; \
while (!__cond && (num_tries > 0)) { \
usleep_range(__min_range, __max_range); \
if (__cond) \
break; \
num_tries--; \
} \
num_tries; \
(__cond) ? 0 : -ETIMEDOUT; \
})
/* the restriction of range is [start, end] */

View File

@ -8,12 +8,14 @@ komeda-y := \
komeda_drv.o \
komeda_dev.o \
komeda_format_caps.o \
komeda_color_mgmt.o \
komeda_pipeline.o \
komeda_pipeline_state.o \
komeda_framebuffer.o \
komeda_kms.o \
komeda_crtc.o \
komeda_plane.o \
komeda_wb_connector.o \
komeda_private_obj.o
komeda-y += \

View File

@ -10,6 +10,7 @@
#include "komeda_kms.h"
#include "malidp_io.h"
#include "komeda_framebuffer.h"
#include "komeda_color_mgmt.h"
static void get_resources_id(u32 hw_id, u32 *pipe_id, u32 *comp_id)
{
@ -134,11 +135,60 @@ static u32 to_rot_ctrl(u32 rot)
return lr_ctrl;
}
static inline u32 to_d71_input_id(struct komeda_component_output *output)
static u32 to_ad_ctrl(u64 modifier)
{
struct komeda_component *comp = output->component;
u32 afbc_ctrl = AD_AEN;
return comp ? (comp->hw_id + output->output_port) : 0;
if (!modifier)
return 0;
if ((modifier & AFBC_FORMAT_MOD_BLOCK_SIZE_MASK) ==
AFBC_FORMAT_MOD_BLOCK_SIZE_32x8)
afbc_ctrl |= AD_WB;
if (modifier & AFBC_FORMAT_MOD_YTR)
afbc_ctrl |= AD_YT;
if (modifier & AFBC_FORMAT_MOD_SPLIT)
afbc_ctrl |= AD_BS;
if (modifier & AFBC_FORMAT_MOD_TILED)
afbc_ctrl |= AD_TH;
return afbc_ctrl;
}
static inline u32 to_d71_input_id(struct komeda_component_state *st, int idx)
{
struct komeda_component_output *input = &st->inputs[idx];
/* if input is not active, set hw input_id(0) to disable it */
if (has_bit(idx, st->active_inputs))
return input->component->hw_id + input->output_port;
else
return 0;
}
static void d71_layer_update_fb(struct komeda_component *c,
struct komeda_fb *kfb,
dma_addr_t *addr)
{
struct drm_framebuffer *fb = &kfb->base;
const struct drm_format_info *info = fb->format;
u32 __iomem *reg = c->reg;
int block_h;
if (info->num_planes > 2)
malidp_write64(reg, BLK_P2_PTR_LOW, addr[2]);
if (info->num_planes > 1) {
block_h = drm_format_info_block_height(info, 1);
malidp_write32(reg, BLK_P1_STRIDE, fb->pitches[1] * block_h);
malidp_write64(reg, BLK_P1_PTR_LOW, addr[1]);
}
block_h = drm_format_info_block_height(info, 0);
malidp_write32(reg, BLK_P0_STRIDE, fb->pitches[0] * block_h);
malidp_write64(reg, BLK_P0_PTR_LOW, addr[0]);
malidp_write32(reg, LAYER_FMT, kfb->format_caps->hw_id);
}
static void d71_layer_disable(struct komeda_component *c)
@ -156,26 +206,65 @@ static void d71_layer_update(struct komeda_component *c,
u32 __iomem *reg = c->reg;
u32 ctrl_mask = L_EN | L_ROT(L_ROT_R270) | L_HFLIP | L_VFLIP | L_TBU_EN;
u32 ctrl = L_EN | to_rot_ctrl(st->rot);
int i;
for (i = 0; i < fb->format->num_planes; i++) {
malidp_write32(reg,
BLK_P0_PTR_LOW + i * LAYER_PER_PLANE_REGS * 4,
lower_32_bits(st->addr[i]));
malidp_write32(reg,
BLK_P0_PTR_HIGH + i * LAYER_PER_PLANE_REGS * 4,
upper_32_bits(st->addr[i]));
if (i >= 2)
break;
d71_layer_update_fb(c, kfb, st->addr);
malidp_write32(reg,
BLK_P0_STRIDE + i * LAYER_PER_PLANE_REGS * 4,
fb->pitches[i] & 0xFFFF);
malidp_write32(reg, AD_CONTROL, to_ad_ctrl(fb->modifier));
if (fb->modifier) {
u64 addr;
malidp_write32(reg, LAYER_AD_H_CROP, HV_CROP(st->afbc_crop_l,
st->afbc_crop_r));
malidp_write32(reg, LAYER_AD_V_CROP, HV_CROP(st->afbc_crop_t,
st->afbc_crop_b));
/* afbc 1.2 wants payload, afbc 1.0/1.1 wants end_addr */
if (fb->modifier & AFBC_FORMAT_MOD_TILED)
addr = st->addr[0] + kfb->offset_payload;
else
addr = st->addr[0] + kfb->afbc_size - 1;
malidp_write32(reg, BLK_P1_PTR_LOW, lower_32_bits(addr));
malidp_write32(reg, BLK_P1_PTR_HIGH, upper_32_bits(addr));
}
if (fb->format->is_yuv) {
u32 upsampling = 0;
switch (kfb->format_caps->fourcc) {
case DRM_FORMAT_YUYV:
upsampling = fb->modifier ? LR_CHI422_BILINEAR :
LR_CHI422_REPLICATION;
break;
case DRM_FORMAT_UYVY:
upsampling = LR_CHI422_REPLICATION;
break;
case DRM_FORMAT_NV12:
case DRM_FORMAT_YUV420_8BIT:
case DRM_FORMAT_YUV420_10BIT:
case DRM_FORMAT_YUV420:
case DRM_FORMAT_P010:
/* these fmt support MPGE/JPEG both, here perfer JPEG*/
upsampling = LR_CHI420_JPEG;
break;
case DRM_FORMAT_X0L2:
upsampling = LR_CHI420_JPEG;
break;
default:
break;
}
malidp_write32(reg, LAYER_R_CONTROL, upsampling);
malidp_write_group(reg, LAYER_YUV_RGB_COEFF0,
KOMEDA_N_YUV2RGB_COEFFS,
komeda_select_yuv2rgb_coeffs(
plane_st->color_encoding,
plane_st->color_range));
}
malidp_write32(reg, LAYER_FMT, kfb->format_caps->hw_id);
malidp_write32(reg, BLK_IN_SIZE, HV_SIZE(st->hsize, st->vsize));
if (kfb->is_va)
ctrl |= L_TBU_EN;
malidp_write32_mask(reg, BLK_CONTROL, ctrl_mask, ctrl);
}
@ -288,10 +377,90 @@ static int d71_layer_init(struct d71_dev *d71,
return 0;
}
static void d71_wb_layer_update(struct komeda_component *c,
struct komeda_component_state *state)
{
struct komeda_layer_state *st = to_layer_st(state);
struct drm_connector_state *conn_st = state->wb_conn->state;
struct komeda_fb *kfb = to_kfb(conn_st->writeback_job->fb);
u32 ctrl = L_EN | LW_OFM, mask = L_EN | LW_OFM | LW_TBU_EN;
u32 __iomem *reg = c->reg;
d71_layer_update_fb(c, kfb, st->addr);
if (kfb->is_va)
ctrl |= LW_TBU_EN;
malidp_write32(reg, BLK_IN_SIZE, HV_SIZE(st->hsize, st->vsize));
malidp_write32(reg, BLK_INPUT_ID0, to_d71_input_id(state, 0));
malidp_write32_mask(reg, BLK_CONTROL, mask, ctrl);
}
static void d71_wb_layer_dump(struct komeda_component *c, struct seq_file *sf)
{
u32 v[12], i;
dump_block_header(sf, c->reg);
get_values_from_reg(c->reg, 0x80, 1, v);
seq_printf(sf, "LW_INPUT_ID0:\t\t0x%X\n", v[0]);
get_values_from_reg(c->reg, 0xD0, 3, v);
seq_printf(sf, "LW_CONTROL:\t\t0x%X\n", v[0]);
seq_printf(sf, "LW_PROG_LINE:\t\t0x%X\n", v[1]);
seq_printf(sf, "LW_FORMAT:\t\t0x%X\n", v[2]);
get_values_from_reg(c->reg, 0xE0, 1, v);
seq_printf(sf, "LW_IN_SIZE:\t\t0x%X\n", v[0]);
for (i = 0; i < 2; i++) {
get_values_from_reg(c->reg, 0x100 + i * 0x10, 3, v);
seq_printf(sf, "LW_P%u_PTR_LOW:\t\t0x%X\n", i, v[0]);
seq_printf(sf, "LW_P%u_PTR_HIGH:\t\t0x%X\n", i, v[1]);
seq_printf(sf, "LW_P%u_STRIDE:\t\t0x%X\n", i, v[2]);
}
get_values_from_reg(c->reg, 0x130, 12, v);
for (i = 0; i < 12; i++)
seq_printf(sf, "LW_RGB_YUV_COEFF%u:\t0x%X\n", i, v[i]);
}
static void d71_wb_layer_disable(struct komeda_component *c)
{
malidp_write32(c->reg, BLK_INPUT_ID0, 0);
malidp_write32_mask(c->reg, BLK_CONTROL, L_EN, 0);
}
static const struct komeda_component_funcs d71_wb_layer_funcs = {
.update = d71_wb_layer_update,
.disable = d71_wb_layer_disable,
.dump_register = d71_wb_layer_dump,
};
static int d71_wb_layer_init(struct d71_dev *d71,
struct block_header *blk, u32 __iomem *reg)
{
DRM_DEBUG("Detect D71_Wb_Layer.\n");
struct komeda_component *c;
struct komeda_layer *wb_layer;
u32 pipe_id, layer_id;
get_resources_id(blk->block_info, &pipe_id, &layer_id);
c = komeda_component_add(&d71->pipes[pipe_id]->base, sizeof(*wb_layer),
layer_id, BLOCK_INFO_INPUT_ID(blk->block_info),
&d71_wb_layer_funcs,
1, get_valid_inputs(blk), 0, reg,
"LPU%d_LAYER_WR", pipe_id);
if (IS_ERR(c)) {
DRM_ERROR("Failed to add wb_layer component\n");
return PTR_ERR(c);
}
wb_layer = to_layer(c);
wb_layer->layer_type = KOMEDA_FMT_WB_LAYER;
set_range(&wb_layer->hsize_in, D71_MIN_LINE_SIZE, d71->max_line_size);
set_range(&wb_layer->vsize_in, D71_MIN_VERTICAL_SIZE, d71->max_vsize);
return 0;
}
@ -303,8 +472,18 @@ static void d71_component_disable(struct komeda_component *c)
malidp_write32(reg, BLK_CONTROL, 0);
for (i = 0; i < c->max_active_inputs; i++)
for (i = 0; i < c->max_active_inputs; i++) {
malidp_write32(reg, BLK_INPUT_ID0 + (i << 2), 0);
/* Besides clearing the input ID to zero, D71 compiz also has
* input enable bit in CU_INPUTx_CONTROL which need to be
* cleared.
*/
if (has_bit(c->id, KOMEDA_PIPELINE_COMPIZS))
malidp_write32(reg, CU_INPUT0_CONTROL +
i * CU_PER_INPUT_REGS * 4,
CU_INPUT_CTRL_ALPHA(0xFF));
}
}
static void compiz_enable_input(u32 __iomem *id_reg,
@ -337,15 +516,15 @@ static void d71_compiz_update(struct komeda_component *c,
struct komeda_compiz_state *st = to_compiz_st(state);
u32 __iomem *reg = c->reg;
u32 __iomem *id_reg, *cfg_reg;
u32 index, input_hw_id;
u32 index;
for_each_changed_input(state, index) {
id_reg = reg + index;
cfg_reg = reg + index * CU_PER_INPUT_REGS;
input_hw_id = to_d71_input_id(&state->inputs[index]);
if (state->active_inputs & BIT(index)) {
compiz_enable_input(id_reg, cfg_reg,
input_hw_id, &st->cins[index]);
to_d71_input_id(state, index),
&st->cins[index]);
} else {
malidp_write32(id_reg, BLK_INPUT_ID0, 0);
malidp_write32(cfg_reg, CU_INPUT0_CONTROL, 0);
@ -424,18 +603,354 @@ static int d71_compiz_init(struct d71_dev *d71,
return 0;
}
static void d71_scaler_update_filter_lut(u32 __iomem *reg, u32 hsize_in,
u32 vsize_in, u32 hsize_out,
u32 vsize_out)
{
u32 val = 0;
if (hsize_in <= hsize_out)
val |= 0x62;
else if (hsize_in <= (hsize_out + hsize_out / 2))
val |= 0x63;
else if (hsize_in <= hsize_out * 2)
val |= 0x64;
else if (hsize_in <= hsize_out * 2 + (hsize_out * 3) / 4)
val |= 0x65;
else
val |= 0x66;
if (vsize_in <= vsize_out)
val |= SC_VTSEL(0x6A);
else if (vsize_in <= (vsize_out + vsize_out / 2))
val |= SC_VTSEL(0x6B);
else if (vsize_in <= vsize_out * 2)
val |= SC_VTSEL(0x6C);
else if (vsize_in <= vsize_out * 2 + vsize_out * 3 / 4)
val |= SC_VTSEL(0x6D);
else
val |= SC_VTSEL(0x6E);
malidp_write32(reg, SC_COEFFTAB, val);
}
static void d71_scaler_update(struct komeda_component *c,
struct komeda_component_state *state)
{
struct komeda_scaler_state *st = to_scaler_st(state);
u32 __iomem *reg = c->reg;
u32 init_ph, delta_ph, ctrl;
d71_scaler_update_filter_lut(reg, st->hsize_in, st->vsize_in,
st->hsize_out, st->vsize_out);
malidp_write32(reg, BLK_IN_SIZE, HV_SIZE(st->hsize_in, st->vsize_in));
malidp_write32(reg, SC_OUT_SIZE, HV_SIZE(st->hsize_out, st->vsize_out));
malidp_write32(reg, SC_H_CROP, HV_CROP(st->left_crop, st->right_crop));
/* for right part, HW only sample the valid pixel which means the pixels
* in left_crop will be jumpped, and the first sample pixel is:
*
* dst_a = st->total_hsize_out - st->hsize_out + st->left_crop + 0.5;
*
* Then the corresponding texel in src is:
*
* h_delta_phase = st->total_hsize_in / st->total_hsize_out;
* src_a = dst_A * h_delta_phase;
*
* and h_init_phase is src_a deduct the real source start src_S;
*
* src_S = st->total_hsize_in - st->hsize_in;
* h_init_phase = src_a - src_S;
*
* And HW precision for the initial/delta_phase is 16:16 fixed point,
* the following is the simplified formula
*/
if (st->right_part) {
u32 dst_a = st->total_hsize_out - st->hsize_out + st->left_crop;
if (st->en_img_enhancement)
dst_a -= 1;
init_ph = ((st->total_hsize_in * (2 * dst_a + 1) -
2 * st->total_hsize_out * (st->total_hsize_in -
st->hsize_in)) << 15) / st->total_hsize_out;
} else {
init_ph = (st->total_hsize_in << 15) / st->total_hsize_out;
}
malidp_write32(reg, SC_H_INIT_PH, init_ph);
delta_ph = (st->total_hsize_in << 16) / st->total_hsize_out;
malidp_write32(reg, SC_H_DELTA_PH, delta_ph);
init_ph = (st->total_vsize_in << 15) / st->vsize_out;
malidp_write32(reg, SC_V_INIT_PH, init_ph);
delta_ph = (st->total_vsize_in << 16) / st->vsize_out;
malidp_write32(reg, SC_V_DELTA_PH, delta_ph);
ctrl = 0;
ctrl |= st->en_scaling ? SC_CTRL_SCL : 0;
ctrl |= st->en_alpha ? SC_CTRL_AP : 0;
ctrl |= st->en_img_enhancement ? SC_CTRL_IENH : 0;
/* If we use the hardware splitter we shouldn't set SC_CTRL_LS */
if (st->en_split &&
state->inputs[0].component->id != KOMEDA_COMPONENT_SPLITTER)
ctrl |= SC_CTRL_LS;
malidp_write32(reg, BLK_CONTROL, ctrl);
malidp_write32(reg, BLK_INPUT_ID0, to_d71_input_id(state, 0));
}
static void d71_scaler_dump(struct komeda_component *c, struct seq_file *sf)
{
u32 v[9];
dump_block_header(sf, c->reg);
get_values_from_reg(c->reg, 0x80, 1, v);
seq_printf(sf, "SC_INPUT_ID0:\t\t0x%X\n", v[0]);
get_values_from_reg(c->reg, 0xD0, 1, v);
seq_printf(sf, "SC_CONTROL:\t\t0x%X\n", v[0]);
get_values_from_reg(c->reg, 0xDC, 9, v);
seq_printf(sf, "SC_COEFFTAB:\t\t0x%X\n", v[0]);
seq_printf(sf, "SC_IN_SIZE:\t\t0x%X\n", v[1]);
seq_printf(sf, "SC_OUT_SIZE:\t\t0x%X\n", v[2]);
seq_printf(sf, "SC_H_CROP:\t\t0x%X\n", v[3]);
seq_printf(sf, "SC_V_CROP:\t\t0x%X\n", v[4]);
seq_printf(sf, "SC_H_INIT_PH:\t\t0x%X\n", v[5]);
seq_printf(sf, "SC_H_DELTA_PH:\t\t0x%X\n", v[6]);
seq_printf(sf, "SC_V_INIT_PH:\t\t0x%X\n", v[7]);
seq_printf(sf, "SC_V_DELTA_PH:\t\t0x%X\n", v[8]);
}
static const struct komeda_component_funcs d71_scaler_funcs = {
.update = d71_scaler_update,
.disable = d71_component_disable,
.dump_register = d71_scaler_dump,
};
static int d71_scaler_init(struct d71_dev *d71,
struct block_header *blk, u32 __iomem *reg)
{
struct komeda_component *c;
struct komeda_scaler *scaler;
u32 pipe_id, comp_id;
get_resources_id(blk->block_info, &pipe_id, &comp_id);
c = komeda_component_add(&d71->pipes[pipe_id]->base, sizeof(*scaler),
comp_id, BLOCK_INFO_INPUT_ID(blk->block_info),
&d71_scaler_funcs,
1, get_valid_inputs(blk), 1, reg,
"CU%d_SCALER%d",
pipe_id, BLOCK_INFO_BLK_ID(blk->block_info));
if (IS_ERR(c)) {
DRM_ERROR("Failed to initialize scaler");
return PTR_ERR(c);
}
scaler = to_scaler(c);
set_range(&scaler->hsize, 4, 2048);
set_range(&scaler->vsize, 4, 4096);
scaler->max_downscaling = 6;
scaler->max_upscaling = 64;
scaler->scaling_split_overlap = 8;
scaler->enh_split_overlap = 1;
malidp_write32(c->reg, BLK_CONTROL, 0);
return 0;
}
static int d71_downscaling_clk_check(struct komeda_pipeline *pipe,
struct drm_display_mode *mode,
unsigned long aclk_rate,
struct komeda_data_flow_cfg *dflow)
{
u32 h_in = dflow->in_w;
u32 v_in = dflow->in_h;
u32 v_out = dflow->out_h;
u64 fraction, denominator;
/* D71 downscaling must satisfy the following equation
*
* ACLK h_in * v_in
* ------- >= ---------------------------------------------
* PXLCLK (h_total - (1 + 2 * v_in / v_out)) * v_out
*
* In only horizontal downscaling situation, the right side should be
* multiplied by (h_total - 3) / (h_active - 3), then equation becomes
*
* ACLK h_in
* ------- >= ----------------
* PXLCLK (h_active - 3)
*
* To avoid precision lost the equation 1 will be convert to:
*
* ACLK h_in * v_in
* ------- >= -----------------------------------
* PXLCLK (h_total -1 ) * v_out - 2 * v_in
*/
if (v_in == v_out) {
fraction = h_in;
denominator = mode->hdisplay - 3;
} else {
fraction = h_in * v_in;
denominator = (mode->htotal - 1) * v_out - 2 * v_in;
}
return aclk_rate * denominator >= mode->clock * 1000 * fraction ?
0 : -EINVAL;
}
static void d71_splitter_update(struct komeda_component *c,
struct komeda_component_state *state)
{
struct komeda_splitter_state *st = to_splitter_st(state);
u32 __iomem *reg = c->reg;
malidp_write32(reg, BLK_INPUT_ID0, to_d71_input_id(state, 0));
malidp_write32(reg, BLK_SIZE, HV_SIZE(st->hsize, st->vsize));
malidp_write32(reg, SP_OVERLAP_SIZE, st->overlap & 0x1FFF);
malidp_write32(reg, BLK_CONTROL, BLK_CTRL_EN);
}
static void d71_splitter_dump(struct komeda_component *c, struct seq_file *sf)
{
u32 v[3];
dump_block_header(sf, c->reg);
get_values_from_reg(c->reg, BLK_INPUT_ID0, 1, v);
seq_printf(sf, "SP_INPUT_ID0:\t\t0x%X\n", v[0]);
get_values_from_reg(c->reg, BLK_CONTROL, 3, v);
seq_printf(sf, "SP_CONTROL:\t\t0x%X\n", v[0]);
seq_printf(sf, "SP_SIZE:\t\t0x%X\n", v[1]);
seq_printf(sf, "SP_OVERLAP_SIZE:\t0x%X\n", v[2]);
}
static const struct komeda_component_funcs d71_splitter_funcs = {
.update = d71_splitter_update,
.disable = d71_component_disable,
.dump_register = d71_splitter_dump,
};
static int d71_splitter_init(struct d71_dev *d71,
struct block_header *blk, u32 __iomem *reg)
{
struct komeda_component *c;
struct komeda_splitter *splitter;
u32 pipe_id, comp_id;
get_resources_id(blk->block_info, &pipe_id, &comp_id);
c = komeda_component_add(&d71->pipes[pipe_id]->base, sizeof(*splitter),
comp_id,
BLOCK_INFO_INPUT_ID(blk->block_info),
&d71_splitter_funcs,
1, get_valid_inputs(blk), 2, reg,
"CU%d_SPLITTER", pipe_id);
if (IS_ERR(c)) {
DRM_ERROR("Failed to initialize splitter");
return -1;
}
splitter = to_splitter(c);
set_range(&splitter->hsize, 4, d71->max_line_size);
set_range(&splitter->vsize, 4, d71->max_vsize);
return 0;
}
static void d71_merger_update(struct komeda_component *c,
struct komeda_component_state *state)
{
struct komeda_merger_state *st = to_merger_st(state);
u32 __iomem *reg = c->reg;
u32 index;
for_each_changed_input(state, index)
malidp_write32(reg, MG_INPUT_ID0 + index * 4,
to_d71_input_id(state, index));
malidp_write32(reg, MG_SIZE, HV_SIZE(st->hsize_merged,
st->vsize_merged));
malidp_write32(reg, BLK_CONTROL, BLK_CTRL_EN);
}
static void d71_merger_dump(struct komeda_component *c, struct seq_file *sf)
{
u32 v;
dump_block_header(sf, c->reg);
get_values_from_reg(c->reg, MG_INPUT_ID0, 1, &v);
seq_printf(sf, "MG_INPUT_ID0:\t\t0x%X\n", v);
get_values_from_reg(c->reg, MG_INPUT_ID1, 1, &v);
seq_printf(sf, "MG_INPUT_ID1:\t\t0x%X\n", v);
get_values_from_reg(c->reg, BLK_CONTROL, 1, &v);
seq_printf(sf, "MG_CONTROL:\t\t0x%X\n", v);
get_values_from_reg(c->reg, MG_SIZE, 1, &v);
seq_printf(sf, "MG_SIZE:\t\t0x%X\n", v);
}
static const struct komeda_component_funcs d71_merger_funcs = {
.update = d71_merger_update,
.disable = d71_component_disable,
.dump_register = d71_merger_dump,
};
static int d71_merger_init(struct d71_dev *d71,
struct block_header *blk, u32 __iomem *reg)
{
struct komeda_component *c;
struct komeda_merger *merger;
u32 pipe_id, comp_id;
get_resources_id(blk->block_info, &pipe_id, &comp_id);
c = komeda_component_add(&d71->pipes[pipe_id]->base, sizeof(*merger),
comp_id,
BLOCK_INFO_INPUT_ID(blk->block_info),
&d71_merger_funcs,
MG_NUM_INPUTS_IDS, get_valid_inputs(blk),
MG_NUM_OUTPUTS_IDS, reg,
"CU%d_MERGER", pipe_id);
if (IS_ERR(c)) {
DRM_ERROR("Failed to initialize merger.\n");
return PTR_ERR(c);
}
merger = to_merger(c);
set_range(&merger->hsize_merged, 4, 4032);
set_range(&merger->vsize_merged, 4, 4096);
return 0;
}
static void d71_improc_update(struct komeda_component *c,
struct komeda_component_state *state)
{
struct komeda_improc_state *st = to_improc_st(state);
u32 __iomem *reg = c->reg;
u32 index, input_hw_id;
u32 index;
for_each_changed_input(state, index) {
input_hw_id = state->active_inputs & BIT(index) ?
to_d71_input_id(&state->inputs[index]) : 0;
malidp_write32(reg, BLK_INPUT_ID0 + index * 4, input_hw_id);
}
for_each_changed_input(state, index)
malidp_write32(reg, BLK_INPUT_ID0 + index * 4,
to_d71_input_id(state, index));
malidp_write32(reg, BLK_SIZE, HV_SIZE(st->hsize, st->vsize));
}
@ -644,9 +1159,16 @@ int d71_probe_block(struct d71_dev *d71,
err = d71_compiz_init(d71, blk, reg);
break;
case D71_BLK_TYPE_CU_SPLITTER:
case D71_BLK_TYPE_CU_SCALER:
err = d71_scaler_init(d71, blk, reg);
break;
case D71_BLK_TYPE_CU_SPLITTER:
err = d71_splitter_init(d71, blk, reg);
break;
case D71_BLK_TYPE_CU_MERGER:
err = d71_merger_init(d71, blk, reg);
break;
case D71_BLK_TYPE_DOU:
@ -683,3 +1205,7 @@ int d71_probe_block(struct d71_dev *d71,
return err;
}
const struct komeda_pipeline_funcs d71_pipeline_funcs = {
.downscaling_clk_check = d71_downscaling_clk_check,
};

View File

@ -280,7 +280,7 @@ static int d71_change_opmode(struct komeda_dev *mdev, int new_mode)
ret = dp_wait_cond(((malidp_read32(d71->gcu_addr, BLK_CONTROL) & 0x7) == opmode),
100, 1000, 10000);
return ret > 0 ? 0 : -ETIMEDOUT;
return ret;
}
static void d71_flush(struct komeda_dev *mdev,
@ -304,7 +304,7 @@ static int d71_reset(struct d71_dev *d71)
ret = dp_wait_cond(!(malidp_read32(gcu, BLK_CONTROL) & GCU_CONTROL_SRST),
100, 1000, 10000);
return ret > 0 ? 0 : -ETIMEDOUT;
return ret;
}
void d71_read_block_header(u32 __iomem *reg, struct block_header *blk)
@ -390,7 +390,7 @@ static int d71_enum_resources(struct komeda_dev *mdev)
for (i = 0; i < d71->num_pipelines; i++) {
pipe = komeda_pipeline_add(mdev, sizeof(struct d71_pipeline),
NULL);
&d71_pipeline_funcs);
if (IS_ERR(pipe)) {
err = PTR_ERR(pipe);
goto err_cleanup;
@ -447,61 +447,119 @@ err_cleanup:
#define AFB_TH_SC_YTR_BS AFBC(_TILED | _SC | _SPARSE | _YTR | _SPLIT)
static struct komeda_format_caps d71_format_caps_table[] = {
/* HW_ID | fourcc | tile_sz | layer_types | rots | afbc_layouts | afbc_features */
/* HW_ID | fourcc | layer_types | rots | afbc_layouts | afbc_features */
/* ABGR_2101010*/
{__HW_ID(0, 0), DRM_FORMAT_ARGB2101010, 1, RICH_SIMPLE_WB, Flip_H_V, 0, 0},
{__HW_ID(0, 1), DRM_FORMAT_ABGR2101010, 1, RICH_SIMPLE_WB, Flip_H_V, 0, 0},
{__HW_ID(0, 1), DRM_FORMAT_ABGR2101010, 1, RICH_SIMPLE, Rot_ALL_H_V, LYT_NM_WB, AFB_TH_SC_YTR_BS}, /* afbc */
{__HW_ID(0, 2), DRM_FORMAT_RGBA1010102, 1, RICH_SIMPLE_WB, Flip_H_V, 0, 0},
{__HW_ID(0, 3), DRM_FORMAT_BGRA1010102, 1, RICH_SIMPLE_WB, Flip_H_V, 0, 0},
{__HW_ID(0, 0), DRM_FORMAT_ARGB2101010, RICH_SIMPLE_WB, Flip_H_V, 0, 0},
{__HW_ID(0, 1), DRM_FORMAT_ABGR2101010, RICH_SIMPLE_WB, Flip_H_V, 0, 0},
{__HW_ID(0, 1), DRM_FORMAT_ABGR2101010, RICH_SIMPLE, Rot_ALL_H_V, LYT_NM_WB, AFB_TH_SC_YTR_BS}, /* afbc */
{__HW_ID(0, 2), DRM_FORMAT_RGBA1010102, RICH_SIMPLE_WB, Flip_H_V, 0, 0},
{__HW_ID(0, 3), DRM_FORMAT_BGRA1010102, RICH_SIMPLE_WB, Flip_H_V, 0, 0},
/* ABGR_8888*/
{__HW_ID(1, 0), DRM_FORMAT_ARGB8888, 1, RICH_SIMPLE_WB, Flip_H_V, 0, 0},
{__HW_ID(1, 1), DRM_FORMAT_ABGR8888, 1, RICH_SIMPLE_WB, Flip_H_V, 0, 0},
{__HW_ID(1, 1), DRM_FORMAT_ABGR8888, 1, RICH_SIMPLE, Rot_ALL_H_V, LYT_NM_WB, AFB_TH_SC_YTR_BS}, /* afbc */
{__HW_ID(1, 2), DRM_FORMAT_RGBA8888, 1, RICH_SIMPLE_WB, Flip_H_V, 0, 0},
{__HW_ID(1, 3), DRM_FORMAT_BGRA8888, 1, RICH_SIMPLE_WB, Flip_H_V, 0, 0},
{__HW_ID(1, 0), DRM_FORMAT_ARGB8888, RICH_SIMPLE_WB, Flip_H_V, 0, 0},
{__HW_ID(1, 1), DRM_FORMAT_ABGR8888, RICH_SIMPLE_WB, Flip_H_V, 0, 0},
{__HW_ID(1, 1), DRM_FORMAT_ABGR8888, RICH_SIMPLE, Rot_ALL_H_V, LYT_NM_WB, AFB_TH_SC_YTR_BS}, /* afbc */
{__HW_ID(1, 2), DRM_FORMAT_RGBA8888, RICH_SIMPLE_WB, Flip_H_V, 0, 0},
{__HW_ID(1, 3), DRM_FORMAT_BGRA8888, RICH_SIMPLE_WB, Flip_H_V, 0, 0},
/* XBGB_8888 */
{__HW_ID(2, 0), DRM_FORMAT_XRGB8888, 1, RICH_SIMPLE_WB, Flip_H_V, 0, 0},
{__HW_ID(2, 1), DRM_FORMAT_XBGR8888, 1, RICH_SIMPLE_WB, Flip_H_V, 0, 0},
{__HW_ID(2, 2), DRM_FORMAT_RGBX8888, 1, RICH_SIMPLE_WB, Flip_H_V, 0, 0},
{__HW_ID(2, 3), DRM_FORMAT_BGRX8888, 1, RICH_SIMPLE_WB, Flip_H_V, 0, 0},
{__HW_ID(2, 0), DRM_FORMAT_XRGB8888, RICH_SIMPLE_WB, Flip_H_V, 0, 0},
{__HW_ID(2, 1), DRM_FORMAT_XBGR8888, RICH_SIMPLE_WB, Flip_H_V, 0, 0},
{__HW_ID(2, 2), DRM_FORMAT_RGBX8888, RICH_SIMPLE_WB, Flip_H_V, 0, 0},
{__HW_ID(2, 3), DRM_FORMAT_BGRX8888, RICH_SIMPLE_WB, Flip_H_V, 0, 0},
/* BGR_888 */ /* none-afbc RGB888 doesn't support rotation and flip */
{__HW_ID(3, 0), DRM_FORMAT_RGB888, 1, RICH_SIMPLE_WB, Rot_0, 0, 0},
{__HW_ID(3, 1), DRM_FORMAT_BGR888, 1, RICH_SIMPLE_WB, Rot_0, 0, 0},
{__HW_ID(3, 1), DRM_FORMAT_BGR888, 1, RICH_SIMPLE, Rot_ALL_H_V, LYT_NM_WB, AFB_TH_SC_YTR_BS}, /* afbc */
{__HW_ID(3, 0), DRM_FORMAT_RGB888, RICH_SIMPLE_WB, Rot_0, 0, 0},
{__HW_ID(3, 1), DRM_FORMAT_BGR888, RICH_SIMPLE_WB, Rot_0, 0, 0},
{__HW_ID(3, 1), DRM_FORMAT_BGR888, RICH_SIMPLE, Rot_ALL_H_V, LYT_NM_WB, AFB_TH_SC_YTR_BS}, /* afbc */
/* BGR 16bpp */
{__HW_ID(4, 0), DRM_FORMAT_RGBA5551, 1, RICH_SIMPLE, Flip_H_V, 0, 0},
{__HW_ID(4, 1), DRM_FORMAT_ABGR1555, 1, RICH_SIMPLE, Flip_H_V, 0, 0},
{__HW_ID(4, 1), DRM_FORMAT_ABGR1555, 1, RICH_SIMPLE, Rot_ALL_H_V, LYT_NM_WB, AFB_TH_SC_YTR}, /* afbc */
{__HW_ID(4, 2), DRM_FORMAT_RGB565, 1, RICH_SIMPLE, Flip_H_V, 0, 0},
{__HW_ID(4, 3), DRM_FORMAT_BGR565, 1, RICH_SIMPLE, Flip_H_V, 0, 0},
{__HW_ID(4, 3), DRM_FORMAT_BGR565, 1, RICH_SIMPLE, Rot_ALL_H_V, LYT_NM_WB, AFB_TH_SC_YTR}, /* afbc */
{__HW_ID(4, 4), DRM_FORMAT_R8, 1, SIMPLE, Rot_0, 0, 0},
{__HW_ID(4, 0), DRM_FORMAT_RGBA5551, RICH_SIMPLE, Flip_H_V, 0, 0},
{__HW_ID(4, 1), DRM_FORMAT_ABGR1555, RICH_SIMPLE, Flip_H_V, 0, 0},
{__HW_ID(4, 1), DRM_FORMAT_ABGR1555, RICH_SIMPLE, Rot_ALL_H_V, LYT_NM_WB, AFB_TH_SC_YTR}, /* afbc */
{__HW_ID(4, 2), DRM_FORMAT_RGB565, RICH_SIMPLE, Flip_H_V, 0, 0},
{__HW_ID(4, 3), DRM_FORMAT_BGR565, RICH_SIMPLE, Flip_H_V, 0, 0},
{__HW_ID(4, 3), DRM_FORMAT_BGR565, RICH_SIMPLE, Rot_ALL_H_V, LYT_NM_WB, AFB_TH_SC_YTR}, /* afbc */
{__HW_ID(4, 4), DRM_FORMAT_R8, SIMPLE, Rot_0, 0, 0},
/* YUV 444/422/420 8bit */
{__HW_ID(5, 0), 0 /*XYUV8888*/, 1, 0, 0, 0, 0},
/* XYUV unsupported*/
{__HW_ID(5, 1), DRM_FORMAT_YUYV, 1, RICH, Rot_ALL_H_V, LYT_NM, AFB_TH}, /* afbc */
{__HW_ID(5, 2), DRM_FORMAT_YUYV, 1, RICH, Flip_H_V, 0, 0},
{__HW_ID(5, 3), DRM_FORMAT_UYVY, 1, RICH, Flip_H_V, 0, 0},
{__HW_ID(5, 4), 0, /*X0L0 */ 2, 0, 0, 0}, /* Y0L0 unsupported */
{__HW_ID(5, 6), DRM_FORMAT_NV12, 1, RICH, Flip_H_V, 0, 0},
{__HW_ID(5, 6), 0/*DRM_FORMAT_YUV420_8BIT*/, 1, RICH, Rot_ALL_H_V, LYT_NM, AFB_TH}, /* afbc */
{__HW_ID(5, 7), DRM_FORMAT_YUV420, 1, RICH, Flip_H_V, 0, 0},
{__HW_ID(5, 1), DRM_FORMAT_YUYV, RICH, Rot_ALL_H_V, LYT_NM, AFB_TH}, /* afbc */
{__HW_ID(5, 2), DRM_FORMAT_YUYV, RICH, Flip_H_V, 0, 0},
{__HW_ID(5, 3), DRM_FORMAT_UYVY, RICH, Flip_H_V, 0, 0},
{__HW_ID(5, 6), DRM_FORMAT_NV12, RICH, Flip_H_V, 0, 0},
{__HW_ID(5, 6), DRM_FORMAT_YUV420_8BIT, RICH, Rot_ALL_H_V, LYT_NM, AFB_TH}, /* afbc */
{__HW_ID(5, 7), DRM_FORMAT_YUV420, RICH, Flip_H_V, 0, 0},
/* YUV 10bit*/
{__HW_ID(6, 0), 0,/*XVYU2101010*/ 1, 0, 0, 0, 0},/* VYV30 unsupported */
{__HW_ID(6, 6), 0/*DRM_FORMAT_X0L2*/, 2, RICH, Flip_H_V, 0, 0},
{__HW_ID(6, 7), 0/*DRM_FORMAT_P010*/, 1, RICH, Flip_H_V, 0, 0},
{__HW_ID(6, 7), 0/*DRM_FORMAT_YUV420_10BIT*/, 1, RICH, Rot_ALL_H_V, LYT_NM, AFB_TH},
{__HW_ID(6, 6), DRM_FORMAT_X0L2, RICH, Flip_H_V, 0, 0},
{__HW_ID(6, 7), DRM_FORMAT_P010, RICH, Flip_H_V, 0, 0},
{__HW_ID(6, 7), DRM_FORMAT_YUV420_10BIT, RICH, Rot_ALL_H_V, LYT_NM, AFB_TH},
};
static bool d71_format_mod_supported(const struct komeda_format_caps *caps,
u32 layer_type, u64 modifier, u32 rot)
{
uint64_t layout = modifier & AFBC_FORMAT_MOD_BLOCK_SIZE_MASK;
if ((layout == AFBC_FORMAT_MOD_BLOCK_SIZE_32x8) &&
drm_rotation_90_or_270(rot)) {
DRM_DEBUG_ATOMIC("D71 doesn't support ROT90 for WB-AFBC.\n");
return false;
}
return true;
}
static void d71_init_fmt_tbl(struct komeda_dev *mdev)
{
struct komeda_format_caps_table *table = &mdev->fmt_tbl;
table->format_caps = d71_format_caps_table;
table->format_mod_supported = d71_format_mod_supported;
table->n_formats = ARRAY_SIZE(d71_format_caps_table);
}
static int d71_connect_iommu(struct komeda_dev *mdev)
{
struct d71_dev *d71 = mdev->chip_data;
u32 __iomem *reg = d71->gcu_addr;
u32 check_bits = (d71->num_pipelines == 2) ?
GCU_STATUS_TCS0 | GCU_STATUS_TCS1 : GCU_STATUS_TCS0;
int i, ret;
if (!d71->integrates_tbu)
return -1;
malidp_write32_mask(reg, BLK_CONTROL, 0x7, TBU_CONNECT_MODE);
ret = dp_wait_cond(has_bits(check_bits, malidp_read32(reg, BLK_STATUS)),
100, 1000, 1000);
if (ret < 0) {
DRM_ERROR("timed out connecting to TCU!\n");
malidp_write32_mask(reg, BLK_CONTROL, 0x7, INACTIVE_MODE);
return ret;
}
for (i = 0; i < d71->num_pipelines; i++)
malidp_write32_mask(d71->pipes[i]->lpu_addr, LPU_TBU_CONTROL,
LPU_TBU_CTRL_TLBPEN, LPU_TBU_CTRL_TLBPEN);
return 0;
}
static int d71_disconnect_iommu(struct komeda_dev *mdev)
{
struct d71_dev *d71 = mdev->chip_data;
u32 __iomem *reg = d71->gcu_addr;
u32 check_bits = (d71->num_pipelines == 2) ?
GCU_STATUS_TCS0 | GCU_STATUS_TCS1 : GCU_STATUS_TCS0;
int ret;
malidp_write32_mask(reg, BLK_CONTROL, 0x7, TBU_DISCONNECT_MODE);
ret = dp_wait_cond(((malidp_read32(reg, BLK_STATUS) & check_bits) == 0),
100, 1000, 1000);
if (ret < 0) {
DRM_ERROR("timed out disconnecting from TCU!\n");
malidp_write32_mask(reg, BLK_CONTROL, 0x7, INACTIVE_MODE);
}
return ret;
}
static const struct komeda_dev_funcs d71_chip_funcs = {
.init_format_table = d71_init_fmt_tbl,
.enum_resources = d71_enum_resources,
@ -512,6 +570,8 @@ static const struct komeda_dev_funcs d71_chip_funcs = {
.on_off_vblank = d71_on_off_vblank,
.change_opmode = d71_change_opmode,
.flush = d71_flush,
.connect_iommu = d71_connect_iommu,
.disconnect_iommu = d71_disconnect_iommu,
};
const struct komeda_dev_funcs *

View File

@ -43,6 +43,8 @@ struct d71_dev {
#define to_d71_pipeline(x) container_of(x, struct d71_pipeline, base)
extern const struct komeda_pipeline_funcs d71_pipeline_funcs;
int d71_probe_block(struct d71_dev *d71,
struct block_header *blk, u32 __iomem *reg);
void d71_read_block_header(u32 __iomem *reg, struct block_header *blk);

View File

@ -0,0 +1,67 @@
// SPDX-License-Identifier: GPL-2.0
/*
* (C) COPYRIGHT 2019 ARM Limited. All rights reserved.
* Author: James.Qian.Wang <james.qian.wang@arm.com>
*
*/
#include "komeda_color_mgmt.h"
/* 10bit precision YUV2RGB matrix */
static const s32 yuv2rgb_bt601_narrow[KOMEDA_N_YUV2RGB_COEFFS] = {
1192, 0, 1634,
1192, -401, -832,
1192, 2066, 0,
64, 512, 512
};
static const s32 yuv2rgb_bt601_wide[KOMEDA_N_YUV2RGB_COEFFS] = {
1024, 0, 1436,
1024, -352, -731,
1024, 1815, 0,
0, 512, 512
};
static const s32 yuv2rgb_bt709_narrow[KOMEDA_N_YUV2RGB_COEFFS] = {
1192, 0, 1836,
1192, -218, -546,
1192, 2163, 0,
64, 512, 512
};
static const s32 yuv2rgb_bt709_wide[KOMEDA_N_YUV2RGB_COEFFS] = {
1024, 0, 1613,
1024, -192, -479,
1024, 1900, 0,
0, 512, 512
};
static const s32 yuv2rgb_bt2020[KOMEDA_N_YUV2RGB_COEFFS] = {
1024, 0, 1476,
1024, -165, -572,
1024, 1884, 0,
0, 512, 512
};
const s32 *komeda_select_yuv2rgb_coeffs(u32 color_encoding, u32 color_range)
{
bool narrow = color_range == DRM_COLOR_YCBCR_LIMITED_RANGE;
const s32 *coeffs;
switch (color_encoding) {
case DRM_COLOR_YCBCR_BT709:
coeffs = narrow ? yuv2rgb_bt709_narrow : yuv2rgb_bt709_wide;
break;
case DRM_COLOR_YCBCR_BT601:
coeffs = narrow ? yuv2rgb_bt601_narrow : yuv2rgb_bt601_wide;
break;
case DRM_COLOR_YCBCR_BT2020:
coeffs = yuv2rgb_bt2020;
break;
default:
coeffs = NULL;
break;
}
return coeffs;
}

View File

@ -0,0 +1,17 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* (C) COPYRIGHT 2019 ARM Limited. All rights reserved.
* Author: James.Qian.Wang <james.qian.wang@arm.com>
*
*/
#ifndef _KOMEDA_COLOR_MGMT_H_
#define _KOMEDA_COLOR_MGMT_H_
#include <drm/drm_color_mgmt.h>
#define KOMEDA_N_YUV2RGB_COEFFS 12
const s32 *komeda_select_yuv2rgb_coeffs(u32 color_encoding, u32 color_range);
#endif

View File

@ -18,6 +18,21 @@
#include "komeda_dev.h"
#include "komeda_kms.h"
static void komeda_crtc_update_clock_ratio(struct komeda_crtc_state *kcrtc_st)
{
u64 pxlclk, aclk;
if (!kcrtc_st->base.active) {
kcrtc_st->clock_ratio = 0;
return;
}
pxlclk = kcrtc_st->base.adjusted_mode.clock * 1000;
aclk = komeda_calc_aclk(kcrtc_st);
kcrtc_st->clock_ratio = div64_u64(aclk << 32, pxlclk);
}
/**
* komeda_crtc_atomic_check - build display output data flow
* @crtc: DRM crtc
@ -38,6 +53,9 @@ komeda_crtc_atomic_check(struct drm_crtc *crtc,
struct komeda_crtc_state *kcrtc_st = to_kcrtc_st(state);
int err;
if (drm_atomic_crtc_needs_modeset(state))
komeda_crtc_update_clock_ratio(kcrtc_st);
if (state->active) {
err = komeda_build_display_data_flow(kcrtc, kcrtc_st);
if (err)
@ -45,6 +63,10 @@ komeda_crtc_atomic_check(struct drm_crtc *crtc,
}
/* release unclaimed pipeline resources */
err = komeda_release_unclaimed_resources(kcrtc->slave, kcrtc_st);
if (err)
return err;
err = komeda_release_unclaimed_resources(kcrtc->master, kcrtc_st);
if (err)
return err;
@ -52,11 +74,12 @@ komeda_crtc_atomic_check(struct drm_crtc *crtc,
return 0;
}
static u32 komeda_calc_mclk(struct komeda_crtc_state *kcrtc_st)
unsigned long komeda_calc_aclk(struct komeda_crtc_state *kcrtc_st)
{
unsigned long mclk = kcrtc_st->base.adjusted_mode.clock * 1000;
struct komeda_dev *mdev = kcrtc_st->base.crtc->dev->dev_private;
unsigned long pxlclk = kcrtc_st->base.adjusted_mode.clock;
return mclk;
return clk_round_rate(mdev->aclk, pxlclk * 1000);
}
/* For active a crtc, mainly need two parts of preparation
@ -89,23 +112,20 @@ komeda_crtc_prepare(struct komeda_crtc *kcrtc)
}
mdev->dpmode = new_mode;
/* Only need to enable mclk on single display mode, but no need to
* enable mclk it on dual display mode, since the dual mode always
* switch from single display mode, the mclk already enabled, no need
/* Only need to enable aclk on single display mode, but no need to
* enable aclk it on dual display mode, since the dual mode always
* switch from single display mode, the aclk already enabled, no need
* to enable it again.
*/
if (new_mode != KOMEDA_MODE_DUAL_DISP) {
err = clk_set_rate(mdev->mclk, komeda_calc_mclk(kcrtc_st));
err = clk_set_rate(mdev->aclk, komeda_calc_aclk(kcrtc_st));
if (err)
DRM_ERROR("failed to set mclk.\n");
err = clk_prepare_enable(mdev->mclk);
DRM_ERROR("failed to set aclk.\n");
err = clk_prepare_enable(mdev->aclk);
if (err)
DRM_ERROR("failed to enable mclk.\n");
DRM_ERROR("failed to enable aclk.\n");
}
err = clk_prepare_enable(master->aclk);
if (err)
DRM_ERROR("failed to enable axi clk for pipe%d.\n", master->id);
err = clk_set_rate(master->pxlclk, pxlclk_rate);
if (err)
DRM_ERROR("failed to set pxlclk for pipe%d\n", master->id);
@ -146,9 +166,8 @@ komeda_crtc_unprepare(struct komeda_crtc *kcrtc)
mdev->dpmode = new_mode;
clk_disable_unprepare(master->pxlclk);
clk_disable_unprepare(master->aclk);
if (new_mode == KOMEDA_MODE_INACTIVE)
clk_disable_unprepare(mdev->mclk);
clk_disable_unprepare(mdev->aclk);
unlock:
mutex_unlock(&mdev->lock);
@ -165,6 +184,15 @@ void komeda_crtc_handle_event(struct komeda_crtc *kcrtc,
if (events & KOMEDA_EVENT_VSYNC)
drm_crtc_handle_vblank(crtc);
if (events & KOMEDA_EVENT_EOW) {
struct komeda_wb_connector *wb_conn = kcrtc->wb_conn;
if (wb_conn)
drm_writeback_signal_completion(&wb_conn->base, 0);
else
DRM_WARN("CRTC[%d]: EOW happen but no wb_connector.\n",
drm_crtc_index(&kcrtc->base));
}
/* will handle it together with the write back support */
if (events & KOMEDA_EVENT_EOW)
DRM_DEBUG("EOW.\n");
@ -201,6 +229,9 @@ komeda_crtc_do_flush(struct drm_crtc *crtc,
struct komeda_crtc_state *kcrtc_st = to_kcrtc_st(crtc->state);
struct komeda_dev *mdev = kcrtc->base.dev->dev_private;
struct komeda_pipeline *master = kcrtc->master;
struct komeda_pipeline *slave = kcrtc->slave;
struct komeda_wb_connector *wb_conn = kcrtc->wb_conn;
struct drm_connector_state *conn_st;
DRM_DEBUG_ATOMIC("CRTC%d_FLUSH: active_pipes: 0x%x, affected: 0x%x.\n",
drm_crtc_index(crtc),
@ -210,6 +241,13 @@ komeda_crtc_do_flush(struct drm_crtc *crtc,
if (has_bit(master->id, kcrtc_st->affected_pipes))
komeda_pipeline_update(master, old->state);
if (slave && has_bit(slave->id, kcrtc_st->affected_pipes))
komeda_pipeline_update(slave, old->state);
conn_st = wb_conn ? wb_conn->base.base.state : NULL;
if (conn_st && conn_st->writeback_job)
drm_writeback_queue_job(&wb_conn->base, conn_st);
/* step 2: notify the HW to kickoff the update */
mdev->funcs->flush(mdev, master->id, kcrtc_st->active_pipes);
}
@ -231,6 +269,7 @@ komeda_crtc_atomic_disable(struct drm_crtc *crtc,
struct komeda_crtc_state *old_st = to_kcrtc_st(old);
struct komeda_dev *mdev = crtc->dev->dev_private;
struct komeda_pipeline *master = kcrtc->master;
struct komeda_pipeline *slave = kcrtc->slave;
struct completion *disable_done = &crtc->state->commit->flip_done;
struct completion temp;
int timeout;
@ -239,6 +278,9 @@ komeda_crtc_atomic_disable(struct drm_crtc *crtc,
drm_crtc_index(crtc),
old_st->active_pipes, old_st->affected_pipes);
if (slave && has_bit(slave->id, old_st->active_pipes))
komeda_pipeline_disable(slave, old->state);
if (has_bit(master->id, old_st->active_pipes))
komeda_pipeline_disable(master, old->state);
@ -311,7 +353,6 @@ komeda_crtc_mode_valid(struct drm_crtc *crtc, const struct drm_display_mode *m)
if (m->flags & DRM_MODE_FLAG_INTERLACE)
return MODE_NO_INTERLACE;
/* main clock/AXI clk must be faster than pxlclk*/
mode_clk = m->clock * 1000;
pxlclk = clk_round_rate(master->pxlclk, mode_clk);
if (pxlclk != mode_clk) {
@ -320,15 +361,9 @@ komeda_crtc_mode_valid(struct drm_crtc *crtc, const struct drm_display_mode *m)
return MODE_NOCLOCK;
}
if (clk_round_rate(mdev->mclk, mode_clk) < pxlclk) {
DRM_DEBUG_ATOMIC("mclk can't satisfy the requirement of %s-clk: %ld.\n",
m->name, pxlclk);
return MODE_CLOCK_HIGH;
}
if (clk_round_rate(master->aclk, mode_clk) < pxlclk) {
DRM_DEBUG_ATOMIC("aclk can't satisfy the requirement of %s-clk: %ld.\n",
/* main engine clock must be faster than pxlclk*/
if (clk_round_rate(mdev->aclk, mode_clk) < pxlclk) {
DRM_DEBUG_ATOMIC("engine clk can't satisfy the requirement of %s-clk: %ld.\n",
m->name, pxlclk);
return MODE_CLOCK_HIGH;
@ -389,6 +424,8 @@ komeda_crtc_atomic_duplicate_state(struct drm_crtc *crtc)
__drm_atomic_helper_crtc_duplicate_state(crtc, &new->base);
new->affected_pipes = old->active_pipes;
new->clock_ratio = old->clock_ratio;
new->max_slave_zorder = old->max_slave_zorder;
return &new->base;
}
@ -417,6 +454,24 @@ static void komeda_crtc_vblank_disable(struct drm_crtc *crtc)
mdev->funcs->on_off_vblank(mdev, kcrtc->master->id, false);
}
static int
komeda_crtc_atomic_get_property(struct drm_crtc *crtc,
const struct drm_crtc_state *state,
struct drm_property *property, uint64_t *val)
{
struct komeda_crtc *kcrtc = to_kcrtc(crtc);
struct komeda_crtc_state *kcrtc_st = to_kcrtc_st(state);
if (property == kcrtc->clock_ratio_property) {
*val = kcrtc_st->clock_ratio;
} else {
DRM_DEBUG_DRIVER("Unknown property %s\n", property->name);
return -EINVAL;
}
return 0;
}
static const struct drm_crtc_funcs komeda_crtc_funcs = {
.gamma_set = drm_atomic_helper_legacy_gamma_set,
.destroy = drm_crtc_cleanup,
@ -427,6 +482,7 @@ static const struct drm_crtc_funcs komeda_crtc_funcs = {
.atomic_destroy_state = komeda_crtc_atomic_destroy_state,
.enable_vblank = komeda_crtc_vblank_enable,
.disable_vblank = komeda_crtc_vblank_disable,
.atomic_get_property = komeda_crtc_atomic_get_property,
};
int komeda_kms_setup_crtcs(struct komeda_kms_dev *kms,
@ -444,7 +500,7 @@ int komeda_kms_setup_crtcs(struct komeda_kms_dev *kms,
master = mdev->pipelines[i];
crtc->master = master;
crtc->slave = NULL;
crtc->slave = komeda_pipeline_get_slave(master);
if (crtc->slave)
sprintf(str, "pipe-%d", crtc->slave->id);
@ -462,6 +518,42 @@ int komeda_kms_setup_crtcs(struct komeda_kms_dev *kms,
return 0;
}
static int komeda_crtc_create_clock_ratio_property(struct komeda_crtc *kcrtc)
{
struct drm_crtc *crtc = &kcrtc->base;
struct drm_property *prop;
prop = drm_property_create_range(crtc->dev, DRM_MODE_PROP_ATOMIC,
"CLOCK_RATIO", 0, U64_MAX);
if (!prop)
return -ENOMEM;
drm_object_attach_property(&crtc->base, prop, 0);
kcrtc->clock_ratio_property = prop;
return 0;
}
static int komeda_crtc_create_slave_planes_property(struct komeda_crtc *kcrtc)
{
struct drm_crtc *crtc = &kcrtc->base;
struct drm_property *prop;
if (kcrtc->slave_planes == 0)
return 0;
prop = drm_property_create_range(crtc->dev, DRM_MODE_PROP_IMMUTABLE,
"slave_planes", 0, U32_MAX);
if (!prop)
return -ENOMEM;
drm_object_attach_property(&crtc->base, prop, kcrtc->slave_planes);
kcrtc->slave_planes_property = prop;
return 0;
}
static struct drm_plane *
get_crtc_primary(struct komeda_kms_dev *kms, struct komeda_crtc *crtc)
{
@ -498,7 +590,15 @@ static int komeda_crtc_add(struct komeda_kms_dev *kms,
crtc->port = kcrtc->master->of_output_port;
return 0;
err = komeda_crtc_create_clock_ratio_property(kcrtc);
if (err)
return err;
err = komeda_crtc_create_slave_planes_property(kcrtc);
if (err)
return err;
return err;
}
int komeda_kms_add_crtcs(struct komeda_kms_dev *kms, struct komeda_dev *mdev)

View File

@ -5,6 +5,7 @@
*
*/
#include <linux/io.h>
#include <linux/iommu.h>
#include <linux/of_device.h>
#include <linux/of_graph.h>
#include <linux/platform_device.h>
@ -52,9 +53,6 @@ static void komeda_debugfs_init(struct komeda_dev *mdev)
return;
mdev->debugfs_root = debugfs_create_dir("komeda", NULL);
if (IS_ERR_OR_NULL(mdev->debugfs_root))
return;
debugfs_create_file("register", 0444, mdev->debugfs_root,
mdev, &komeda_register_fops);
}
@ -115,13 +113,6 @@ static int komeda_parse_pipe_dt(struct komeda_dev *mdev, struct device_node *np)
pipe = mdev->pipelines[pipe_id];
clk = of_clk_get_by_name(np, "aclk");
if (IS_ERR(clk)) {
DRM_ERROR("get aclk for pipeline %d failed!\n", pipe_id);
return PTR_ERR(clk);
}
pipe->aclk = clk;
clk = of_clk_get_by_name(np, "pxclk");
if (IS_ERR(clk)) {
DRM_ERROR("get pxclk for pipeline %d failed!\n", pipe_id);
@ -144,14 +135,8 @@ static int komeda_parse_dt(struct device *dev, struct komeda_dev *mdev)
{
struct platform_device *pdev = to_platform_device(dev);
struct device_node *child, *np = dev->of_node;
struct clk *clk;
int ret;
clk = devm_clk_get(dev, "mclk");
if (IS_ERR(clk))
return PTR_ERR(clk);
mdev->mclk = clk;
mdev->irq = platform_get_irq(pdev, 0);
if (mdev->irq < 0) {
DRM_ERROR("could not get IRQ number.\n");
@ -205,16 +190,15 @@ struct komeda_dev *komeda_dev_create(struct device *dev)
goto err_cleanup;
}
mdev->pclk = devm_clk_get(dev, "pclk");
if (IS_ERR(mdev->pclk)) {
DRM_ERROR("Get APB clk failed.\n");
err = PTR_ERR(mdev->pclk);
mdev->pclk = NULL;
mdev->aclk = devm_clk_get(dev, "aclk");
if (IS_ERR(mdev->aclk)) {
DRM_ERROR("Get engine clk failed.\n");
err = PTR_ERR(mdev->aclk);
mdev->aclk = NULL;
goto err_cleanup;
}
/* Enable APB clock to access the registers */
clk_prepare_enable(mdev->pclk);
clk_prepare_enable(mdev->aclk);
mdev->funcs = product->identify(mdev->reg_base, &mdev->chip);
if (!komeda_product_match(mdev, product->product_id)) {
@ -253,6 +237,18 @@ struct komeda_dev *komeda_dev_create(struct device *dev)
dev->dma_parms = &mdev->dma_parms;
dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
mdev->iommu = iommu_get_domain_for_dev(mdev->dev);
if (!mdev->iommu)
DRM_INFO("continue without IOMMU support!\n");
if (mdev->iommu && mdev->funcs->connect_iommu) {
err = mdev->funcs->connect_iommu(mdev);
if (err) {
mdev->iommu = NULL;
goto err_cleanup;
}
}
err = sysfs_create_group(&dev->kobj, &komeda_sysfs_attr_group);
if (err) {
DRM_ERROR("create sysfs group failed.\n");
@ -282,6 +278,10 @@ void komeda_dev_destroy(struct komeda_dev *mdev)
debugfs_remove_recursive(mdev->debugfs_root);
#endif
if (mdev->iommu && mdev->funcs->disconnect_iommu)
mdev->funcs->disconnect_iommu(mdev);
mdev->iommu = NULL;
for (i = 0; i < mdev->n_pipelines; i++) {
komeda_pipeline_destroy(mdev, mdev->pipelines[i]);
mdev->pipelines[i] = NULL;
@ -297,15 +297,10 @@ void komeda_dev_destroy(struct komeda_dev *mdev)
mdev->reg_base = NULL;
}
if (mdev->mclk) {
devm_clk_put(dev, mdev->mclk);
mdev->mclk = NULL;
}
if (mdev->pclk) {
clk_disable_unprepare(mdev->pclk);
devm_clk_put(dev, mdev->pclk);
mdev->pclk = NULL;
if (mdev->aclk) {
clk_disable_unprepare(mdev->aclk);
devm_clk_put(dev, mdev->aclk);
mdev->aclk = NULL;
}
devm_kfree(dev, mdev);

View File

@ -92,6 +92,10 @@ struct komeda_dev_funcs {
int (*enum_resources)(struct komeda_dev *mdev);
/** @cleanup: call to chip to cleanup komeda_dev->chip data */
void (*cleanup)(struct komeda_dev *mdev);
/** @connect_iommu: Optional, connect to external iommu */
int (*connect_iommu)(struct komeda_dev *mdev);
/** @disconnect_iommu: Optional, disconnect to external iommu */
int (*disconnect_iommu)(struct komeda_dev *mdev);
/**
* @irq_handler:
*
@ -156,10 +160,8 @@ struct komeda_dev {
struct komeda_chip_info chip;
/** @fmt_tbl: initialized by &komeda_dev_funcs->init_format_table */
struct komeda_format_caps_table fmt_tbl;
/** @pclk: APB clock for register access */
struct clk *pclk;
/** @mclk: HW main engine clk */
struct clk *mclk;
/** @aclk: HW main engine clk */
struct clk *aclk;
/** @irq: irq number */
int irq;
@ -184,6 +186,9 @@ struct komeda_dev {
*/
void *chip_data;
/** @iommu: iommu domain */
struct iommu_domain *iommu;
/** @debugfs_root: root directory of komeda debugfs */
struct dentry *debugfs_root;
};

View File

@ -35,6 +35,64 @@ komeda_get_format_caps(struct komeda_format_caps_table *table,
return NULL;
}
/* Two assumptions
* 1. RGB always has YTR
* 2. Tiled RGB always has SC
*/
u64 komeda_supported_modifiers[] = {
/* AFBC_16x16 + features: YUV+RGB both */
AFBC_16x16(0),
/* SPARSE */
AFBC_16x16(_SPARSE),
/* YTR + (SPARSE) */
AFBC_16x16(_YTR | _SPARSE),
AFBC_16x16(_YTR),
/* SPLIT + SPARSE + YTR RGB only */
/* split mode is only allowed for sparse mode */
AFBC_16x16(_SPLIT | _SPARSE | _YTR),
/* TILED + (SPARSE) */
/* TILED YUV format only */
AFBC_16x16(_TILED | _SPARSE),
AFBC_16x16(_TILED),
/* TILED + SC + (SPLIT+SPARSE | SPARSE) + (YTR) */
AFBC_16x16(_TILED | _SC | _SPLIT | _SPARSE | _YTR),
AFBC_16x16(_TILED | _SC | _SPARSE | _YTR),
AFBC_16x16(_TILED | _SC | _YTR),
/* AFBC_32x8 + features: which are RGB formats only */
/* YTR + (SPARSE) */
AFBC_32x8(_YTR | _SPARSE),
AFBC_32x8(_YTR),
/* SPLIT + SPARSE + (YTR) */
/* split mode is only allowed for sparse mode */
AFBC_32x8(_SPLIT | _SPARSE | _YTR),
/* TILED + SC + (SPLIT+SPARSE | SPARSE) + YTR */
AFBC_32x8(_TILED | _SC | _SPLIT | _SPARSE | _YTR),
AFBC_32x8(_TILED | _SC | _SPARSE | _YTR),
AFBC_32x8(_TILED | _SC | _YTR),
DRM_FORMAT_MOD_LINEAR,
DRM_FORMAT_MOD_INVALID
};
bool komeda_format_mod_supported(struct komeda_format_caps_table *table,
u32 layer_type, u32 fourcc, u64 modifier,
u32 rot)
{
const struct komeda_format_caps *caps;
caps = komeda_get_format_caps(table, fourcc, modifier);
if (!caps)
return false;
if (!(caps->supported_layer_types & layer_type))
return false;
if (table->format_mod_supported)
return table->format_mod_supported(caps, layer_type, modifier,
rot);
return true;
}
u32 *komeda_get_layer_fourcc_list(struct komeda_format_caps_table *table,
u32 layer_type, u32 *n_fmts)
{

View File

@ -50,7 +50,6 @@
*
* @hw_id: hw format id, hw specific value.
* @fourcc: drm fourcc format.
* @tile_size: format tiled size, used by ARM format X0L0/X0L2
* @supported_layer_types: indicate which layer supports this format
* @supported_rots: allowed rotations for this format
* @supported_afbc_layouts: supported afbc layerout
@ -59,7 +58,6 @@
struct komeda_format_caps {
u32 hw_id;
u32 fourcc;
u32 tile_size;
u32 supported_layer_types;
u32 supported_rots;
u32 supported_afbc_layouts;
@ -71,12 +69,30 @@ struct komeda_format_caps {
*
* @n_formats: the size of format_caps list.
* @format_caps: format_caps list.
* @format_mod_supported: Optional. Some HW may have special requirements or
* limitations which can not be described by format_caps, this func supply HW
* the ability to do the further HW specific check.
*/
struct komeda_format_caps_table {
u32 n_formats;
const struct komeda_format_caps *format_caps;
bool (*format_mod_supported)(const struct komeda_format_caps *caps,
u32 layer_type, u64 modifier, u32 rot);
};
extern u64 komeda_supported_modifiers[];
static inline const char *komeda_get_format_name(u32 fourcc, u64 modifier)
{
struct drm_format_name_buf buf;
static char name[64];
snprintf(name, sizeof(name), "%s with modifier: 0x%llx.",
drm_get_format_name(fourcc, &buf), modifier);
return name;
}
const struct komeda_format_caps *
komeda_get_format_caps(struct komeda_format_caps_table *table,
u32 fourcc, u64 modifier);
@ -86,4 +102,8 @@ u32 *komeda_get_layer_fourcc_list(struct komeda_format_caps_table *table,
void komeda_put_fourcc_list(u32 *fourcc_list);
bool komeda_format_mod_supported(struct komeda_format_caps_table *table,
u32 layer_type, u32 fourcc, u64 modifier,
u32 rot);
#endif

View File

@ -36,52 +36,112 @@ static const struct drm_framebuffer_funcs komeda_fb_funcs = {
.create_handle = komeda_fb_create_handle,
};
static int
komeda_fb_afbc_size_check(struct komeda_fb *kfb, struct drm_file *file,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
struct drm_framebuffer *fb = &kfb->base;
const struct drm_format_info *info = fb->format;
struct drm_gem_object *obj;
u32 alignment_w = 0, alignment_h = 0, alignment_header, n_blocks;
u64 min_size;
obj = drm_gem_object_lookup(file, mode_cmd->handles[0]);
if (!obj) {
DRM_DEBUG_KMS("Failed to lookup GEM object\n");
return -ENOENT;
}
switch (fb->modifier & AFBC_FORMAT_MOD_BLOCK_SIZE_MASK) {
case AFBC_FORMAT_MOD_BLOCK_SIZE_32x8:
alignment_w = 32;
alignment_h = 8;
break;
case AFBC_FORMAT_MOD_BLOCK_SIZE_16x16:
alignment_w = 16;
alignment_h = 16;
break;
default:
WARN(1, "Invalid AFBC_FORMAT_MOD_BLOCK_SIZE: %lld.\n",
fb->modifier & AFBC_FORMAT_MOD_BLOCK_SIZE_MASK);
break;
}
/* tiled header afbc */
if (fb->modifier & AFBC_FORMAT_MOD_TILED) {
alignment_w *= AFBC_TH_LAYOUT_ALIGNMENT;
alignment_h *= AFBC_TH_LAYOUT_ALIGNMENT;
alignment_header = AFBC_TH_BODY_START_ALIGNMENT;
} else {
alignment_header = AFBC_BODY_START_ALIGNMENT;
}
kfb->aligned_w = ALIGN(fb->width, alignment_w);
kfb->aligned_h = ALIGN(fb->height, alignment_h);
if (fb->offsets[0] % alignment_header) {
DRM_DEBUG_KMS("afbc offset alignment check failed.\n");
goto check_failed;
}
n_blocks = (kfb->aligned_w * kfb->aligned_h) / AFBC_SUPERBLK_PIXELS;
kfb->offset_payload = ALIGN(n_blocks * AFBC_HEADER_SIZE,
alignment_header);
kfb->afbc_size = kfb->offset_payload + n_blocks *
ALIGN(info->cpp[0] * AFBC_SUPERBLK_PIXELS,
AFBC_SUPERBLK_ALIGNMENT);
min_size = kfb->afbc_size + fb->offsets[0];
if (min_size > obj->size) {
DRM_DEBUG_KMS("afbc size check failed, obj_size: 0x%zx. min_size 0x%llx.\n",
obj->size, min_size);
goto check_failed;
}
fb->obj[0] = obj;
return 0;
check_failed:
drm_gem_object_put_unlocked(obj);
return -EINVAL;
}
static int
komeda_fb_none_afbc_size_check(struct komeda_dev *mdev, struct komeda_fb *kfb,
struct drm_file *file,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
struct drm_framebuffer *fb = &kfb->base;
const struct drm_format_info *info = fb->format;
struct drm_gem_object *obj;
u32 min_size = 0;
u32 i;
u32 i, block_h;
u64 min_size;
for (i = 0; i < fb->format->num_planes; i++) {
if (komeda_fb_check_src_coords(kfb, 0, 0, fb->width, fb->height))
return -EINVAL;
for (i = 0; i < info->num_planes; i++) {
obj = drm_gem_object_lookup(file, mode_cmd->handles[i]);
if (!obj) {
DRM_DEBUG_KMS("Failed to lookup GEM object\n");
fb->obj[i] = NULL;
return -ENOENT;
}
fb->obj[i] = obj;
kfb->aligned_w = fb->width / (i ? fb->format->hsub : 1);
kfb->aligned_h = fb->height / (i ? fb->format->vsub : 1);
if (fb->pitches[i] % mdev->chip.bus_width) {
block_h = drm_format_info_block_height(info, i);
if ((fb->pitches[i] * block_h) % mdev->chip.bus_width) {
DRM_DEBUG_KMS("Pitch[%d]: 0x%x doesn't align to 0x%x\n",
i, fb->pitches[i], mdev->chip.bus_width);
drm_gem_object_put_unlocked(obj);
fb->obj[i] = NULL;
return -EINVAL;
}
min_size = ((kfb->aligned_h / kfb->format_caps->tile_size - 1)
* fb->pitches[i])
+ (kfb->aligned_w * fb->format->cpp[i]
* kfb->format_caps->tile_size)
+ fb->offsets[i];
min_size = komeda_fb_get_pixel_addr(kfb, 0, fb->height, i)
- to_drm_gem_cma_obj(obj)->paddr;
if (obj->size < min_size) {
DRM_DEBUG_KMS("Fail to check none afbc fb size.\n");
drm_gem_object_put_unlocked(obj);
fb->obj[i] = NULL;
DRM_DEBUG_KMS("The fb->obj[%d] size: 0x%zx lower than the minimum requirement: 0x%llx.\n",
i, obj->size, min_size);
return -EINVAL;
}
fb->obj[i] = obj;
}
if (fb->format->num_planes == 3) {
@ -118,7 +178,10 @@ komeda_fb_create(struct drm_device *dev, struct drm_file *file,
drm_helper_mode_fill_fb_struct(dev, &kfb->base, mode_cmd);
ret = komeda_fb_none_afbc_size_check(mdev, kfb, file, mode_cmd);
if (kfb->base.modifier)
ret = komeda_fb_afbc_size_check(kfb, file, mode_cmd);
else
ret = komeda_fb_none_afbc_size_check(mdev, kfb, file, mode_cmd);
if (ret < 0)
goto err_cleanup;
@ -129,6 +192,8 @@ komeda_fb_create(struct drm_device *dev, struct drm_file *file,
goto err_cleanup;
}
kfb->is_va = mdev->iommu ? true : false;
return &kfb->base;
err_cleanup:
@ -139,12 +204,42 @@ err_cleanup:
return ERR_PTR(ret);
}
int komeda_fb_check_src_coords(const struct komeda_fb *kfb,
u32 src_x, u32 src_y, u32 src_w, u32 src_h)
{
const struct drm_framebuffer *fb = &kfb->base;
const struct drm_format_info *info = fb->format;
u32 block_w = drm_format_info_block_width(fb->format, 0);
u32 block_h = drm_format_info_block_height(fb->format, 0);
if ((src_x + src_w > fb->width) || (src_y + src_h > fb->height)) {
DRM_DEBUG_ATOMIC("Invalid source coordinate.\n");
return -EINVAL;
}
if ((src_x % info->hsub) || (src_w % info->hsub) ||
(src_y % info->vsub) || (src_h % info->vsub)) {
DRM_DEBUG_ATOMIC("Wrong subsampling dimension x:%d, y:%d, w:%d, h:%d for format: %x.\n",
src_x, src_y, src_w, src_h, info->format);
return -EINVAL;
}
if ((src_x % block_w) || (src_w % block_w) ||
(src_y % block_h) || (src_h % block_h)) {
DRM_DEBUG_ATOMIC("x:%d, y:%d, w:%d, h:%d should be multiple of block_w/h for format: %x.\n",
src_x, src_y, src_w, src_h, info->format);
return -EINVAL;
}
return 0;
}
dma_addr_t
komeda_fb_get_pixel_addr(struct komeda_fb *kfb, int x, int y, int plane)
{
struct drm_framebuffer *fb = &kfb->base;
const struct drm_gem_cma_object *obj;
u32 plane_x, plane_y, cpp, pitch, offset;
u32 offset, plane_x, plane_y, block_w, block_sz;
if (plane >= fb->format->num_planes) {
DRM_DEBUG_KMS("Out of max plane num.\n");
@ -155,13 +250,33 @@ komeda_fb_get_pixel_addr(struct komeda_fb *kfb, int x, int y, int plane)
offset = fb->offsets[plane];
if (!fb->modifier) {
block_w = drm_format_info_block_width(fb->format, plane);
block_sz = fb->format->char_per_block[plane];
plane_x = x / (plane ? fb->format->hsub : 1);
plane_y = y / (plane ? fb->format->vsub : 1);
cpp = fb->format->cpp[plane];
pitch = fb->pitches[plane];
offset += plane_x * cpp * kfb->format_caps->tile_size +
(plane_y * pitch) / kfb->format_caps->tile_size;
offset += (plane_x / block_w) * block_sz
+ plane_y * fb->pitches[plane];
}
return obj->paddr + offset;
}
/* if the fb can be supported by a specific layer */
bool komeda_fb_is_layer_supported(struct komeda_fb *kfb, u32 layer_type,
u32 rot)
{
struct drm_framebuffer *fb = &kfb->base;
struct komeda_dev *mdev = fb->dev->dev_private;
u32 fourcc = fb->format->format;
u64 modifier = fb->modifier;
bool supported;
supported = komeda_format_mod_supported(&mdev->fmt_tbl, layer_type,
fourcc, modifier, rot);
if (!supported)
DRM_DEBUG_ATOMIC("Layer TYPE: %d doesn't support fb FMT: %s.\n",
layer_type, komeda_get_format_name(fourcc, modifier));
return supported;
}

View File

@ -21,19 +21,28 @@ struct komeda_fb {
* extends drm_format_info for komeda specific information
*/
const struct komeda_format_caps *format_caps;
/** @is_va: if smmu is enabled, it will be true */
bool is_va;
/** @aligned_w: aligned frame buffer width */
u32 aligned_w;
/** @aligned_h: aligned frame buffer height */
u32 aligned_h;
/** @afbc_size: minimum size of afbc */
u32 afbc_size;
/** @offset_payload: start of afbc body buffer */
u32 offset_payload;
};
#define to_kfb(dfb) container_of(dfb, struct komeda_fb, base)
struct drm_framebuffer *
komeda_fb_create(struct drm_device *dev, struct drm_file *file,
const struct drm_mode_fb_cmd2 *mode_cmd);
const struct drm_mode_fb_cmd2 *mode_cmd);
int komeda_fb_check_src_coords(const struct komeda_fb *kfb,
u32 src_x, u32 src_y, u32 src_w, u32 src_h);
dma_addr_t
komeda_fb_get_pixel_addr(struct komeda_fb *kfb, int x, int y, int plane);
bool komeda_fb_is_layer_supported(struct komeda_fb *kfb, u32 layer_type);
bool komeda_fb_is_layer_supported(struct komeda_fb *kfb, u32 layer_type,
u32 rot);
#endif

View File

@ -58,7 +58,6 @@ static struct drm_driver komeda_kms_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC |
DRIVER_PRIME | DRIVER_HAVE_IRQ,
.lastclose = drm_fb_helper_lastclose,
.irq_handler = komeda_kms_irq_handler,
.gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.dumb_create = komeda_gem_cma_dumb_create,
@ -100,6 +99,108 @@ static const struct drm_mode_config_helper_funcs komeda_mode_config_helpers = {
.atomic_commit_tail = komeda_kms_commit_tail,
};
static int komeda_plane_state_list_add(struct drm_plane_state *plane_st,
struct list_head *zorder_list)
{
struct komeda_plane_state *new = to_kplane_st(plane_st);
struct komeda_plane_state *node, *last;
last = list_empty(zorder_list) ?
NULL : list_last_entry(zorder_list, typeof(*last), zlist_node);
/* Considering the list sequence is zpos increasing, so if list is empty
* or the zpos of new node bigger than the last node in list, no need
* loop and just insert the new one to the tail of the list.
*/
if (!last || (new->base.zpos > last->base.zpos)) {
list_add_tail(&new->zlist_node, zorder_list);
return 0;
}
/* Build the list by zpos increasing */
list_for_each_entry(node, zorder_list, zlist_node) {
if (new->base.zpos < node->base.zpos) {
list_add_tail(&new->zlist_node, &node->zlist_node);
break;
} else if (node->base.zpos == new->base.zpos) {
struct drm_plane *a = node->base.plane;
struct drm_plane *b = new->base.plane;
/* Komeda doesn't support setting a same zpos for
* different planes.
*/
DRM_DEBUG_ATOMIC("PLANE: %s and PLANE: %s are configured same zpos: %d.\n",
a->name, b->name, node->base.zpos);
return -EINVAL;
}
}
return 0;
}
static int komeda_crtc_normalize_zpos(struct drm_crtc *crtc,
struct drm_crtc_state *crtc_st)
{
struct drm_atomic_state *state = crtc_st->state;
struct komeda_crtc *kcrtc = to_kcrtc(crtc);
struct komeda_crtc_state *kcrtc_st = to_kcrtc_st(crtc_st);
struct komeda_plane_state *kplane_st;
struct drm_plane_state *plane_st;
struct drm_framebuffer *fb;
struct drm_plane *plane;
struct list_head zorder_list;
int order = 0, err;
DRM_DEBUG_ATOMIC("[CRTC:%d:%s] calculating normalized zpos values\n",
crtc->base.id, crtc->name);
INIT_LIST_HEAD(&zorder_list);
/* This loop also added all effected planes into the new state */
drm_for_each_plane_mask(plane, crtc->dev, crtc_st->plane_mask) {
plane_st = drm_atomic_get_plane_state(state, plane);
if (IS_ERR(plane_st))
return PTR_ERR(plane_st);
/* Build a list by zpos increasing */
err = komeda_plane_state_list_add(plane_st, &zorder_list);
if (err)
return err;
}
kcrtc_st->max_slave_zorder = 0;
list_for_each_entry(kplane_st, &zorder_list, zlist_node) {
plane_st = &kplane_st->base;
fb = plane_st->fb;
plane = plane_st->plane;
plane_st->normalized_zpos = order++;
/* When layer_split has been enabled, one plane will be handled
* by two separated komeda layers (left/right), which may needs
* two zorders.
* - zorder: for left_layer for left display part.
* - zorder + 1: will be reserved for right layer.
*/
if (to_kplane_st(plane_st)->layer_split)
order++;
DRM_DEBUG_ATOMIC("[PLANE:%d:%s] zpos:%d, normalized zpos: %d\n",
plane->base.id, plane->name,
plane_st->zpos, plane_st->normalized_zpos);
/* calculate max slave zorder */
if (has_bit(drm_plane_index(plane), kcrtc->slave_planes))
kcrtc_st->max_slave_zorder =
max(plane_st->normalized_zpos,
kcrtc_st->max_slave_zorder);
}
crtc_st->zpos_changed = true;
return 0;
}
static int komeda_kms_check(struct drm_device *dev,
struct drm_atomic_state *state)
{
@ -111,7 +212,7 @@ static int komeda_kms_check(struct drm_device *dev,
if (err)
return err;
/* komeda need to re-calculate resource assumption in every commit
/* Komeda need to re-calculate resource assumption in every commit
* so need to add all affected_planes (even unchanged) to
* drm_atomic_state.
*/
@ -119,6 +220,10 @@ static int komeda_kms_check(struct drm_device *dev,
err = drm_atomic_add_affected_planes(state, crtc);
if (err)
return err;
err = komeda_crtc_normalize_zpos(crtc, new_crtc_st);
if (err)
return err;
}
err = drm_atomic_helper_check_planes(dev, state);
@ -148,7 +253,7 @@ static void komeda_kms_mode_config_init(struct komeda_kms_dev *kms,
config->min_height = 0;
config->max_width = 4096;
config->max_height = 4096;
config->allow_fb_modifiers = false;
config->allow_fb_modifiers = true;
config->funcs = &komeda_mode_config_funcs;
config->helper_private = &komeda_mode_config_helpers;
@ -188,29 +293,36 @@ struct komeda_kms_dev *komeda_kms_attach(struct komeda_dev *mdev)
if (err)
goto cleanup_mode_config;
err = komeda_kms_add_wb_connectors(kms, mdev);
if (err)
goto cleanup_mode_config;
err = component_bind_all(mdev->dev, kms);
if (err)
goto cleanup_mode_config;
drm_mode_config_reset(drm);
err = drm_irq_install(drm, mdev->irq);
err = devm_request_irq(drm->dev, mdev->irq,
komeda_kms_irq_handler, IRQF_SHARED,
drm->driver->name, drm);
if (err)
goto cleanup_mode_config;
err = mdev->funcs->enable_irq(mdev);
if (err)
goto uninstall_irq;
goto cleanup_mode_config;
drm->irq_enabled = true;
err = drm_dev_register(drm, 0);
if (err)
goto uninstall_irq;
goto cleanup_mode_config;
return kms;
uninstall_irq:
drm_irq_uninstall(drm);
cleanup_mode_config:
drm->irq_enabled = false;
drm_mode_config_cleanup(drm);
komeda_kms_cleanup_private_objs(kms);
free_kms:
@ -223,9 +335,9 @@ void komeda_kms_detach(struct komeda_kms_dev *kms)
struct drm_device *drm = &kms->base;
struct komeda_dev *mdev = drm->dev_private;
drm->irq_enabled = false;
mdev->funcs->disable_irq(mdev);
drm_dev_unregister(drm);
drm_irq_uninstall(drm);
component_unbind_all(mdev->dev, drm);
komeda_kms_cleanup_private_objs(kms);
drm_mode_config_cleanup(drm);

View File

@ -7,11 +7,13 @@
#ifndef _KOMEDA_KMS_H_
#define _KOMEDA_KMS_H_
#include <linux/list.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_device.h>
#include <drm/drm_writeback.h>
#include <drm/drm_print.h>
#include <video/videomode.h>
#include <video/display_timing.h>
@ -31,6 +33,11 @@ struct komeda_plane {
* Layers with same capabilities.
*/
struct komeda_layer *layer;
/** @prop_img_enhancement: for on/off image enhancement */
struct drm_property *prop_img_enhancement;
/** @prop_layer_split: for on/off layer_split */
struct drm_property *prop_layer_split;
};
/**
@ -42,8 +49,14 @@ struct komeda_plane {
struct komeda_plane_state {
/** @base: &drm_plane_state */
struct drm_plane_state base;
/** @zlist_node: zorder list node */
struct list_head zlist_node;
/* private properties */
/* @img_enhancement: on/off image enhancement
* @layer_split: on/off layer_split
*/
u8 img_enhancement : 1,
layer_split : 1;
};
/**
@ -73,8 +86,20 @@ struct komeda_crtc {
*/
struct komeda_pipeline *slave;
/** @slave_planes: komeda slave planes mask */
u32 slave_planes;
/** @wb_conn: komeda write back connector */
struct komeda_wb_connector *wb_conn;
/** @disable_done: this flip_done is for tracing the disable */
struct completion *disable_done;
/** @clock_ratio_property: property for ratio of (aclk << 32)/pxlclk */
struct drm_property *clock_ratio_property;
/** @slave_planes_property: property for slaves of the planes */
struct drm_property *slave_planes_property;
};
/**
@ -97,6 +122,12 @@ struct komeda_crtc_state {
* the active pipelines in once display instance
*/
u32 active_pipes;
/** @clock_ratio: ratio of (aclk << 32)/pxlclk */
u64 clock_ratio;
/** @max_slave_zorder: the maximum of slave zorder */
u32 max_slave_zorder;
};
/** struct komeda_kms_dev - for gather KMS related things */
@ -116,6 +147,42 @@ struct komeda_kms_dev {
#define to_kcrtc(p) container_of(p, struct komeda_crtc, base)
#define to_kcrtc_st(p) container_of(p, struct komeda_crtc_state, base)
#define to_kdev(p) container_of(p, struct komeda_kms_dev, base)
#define to_wb_conn(x) container_of(x, struct drm_writeback_connector, base)
static inline bool is_writeback_only(struct drm_crtc_state *st)
{
struct komeda_wb_connector *wb_conn = to_kcrtc(st->crtc)->wb_conn;
struct drm_connector *conn = wb_conn ? &wb_conn->base.base : NULL;
return conn && (st->connector_mask == BIT(drm_connector_index(conn)));
}
static inline bool
is_only_changed_connector(struct drm_crtc_state *st, struct drm_connector *conn)
{
struct drm_crtc_state *old_st;
u32 changed_connectors;
old_st = drm_atomic_get_old_crtc_state(st->state, st->crtc);
changed_connectors = st->connector_mask ^ old_st->connector_mask;
return BIT(drm_connector_index(conn)) == changed_connectors;
}
static inline bool has_flip_h(u32 rot)
{
u32 rotation = drm_rotation_simplify(rot,
DRM_MODE_ROTATE_0 |
DRM_MODE_ROTATE_90 |
DRM_MODE_REFLECT_MASK);
if (rotation & DRM_MODE_ROTATE_90)
return !!(rotation & DRM_MODE_REFLECT_Y);
else
return !!(rotation & DRM_MODE_REFLECT_X);
}
unsigned long komeda_calc_aclk(struct komeda_crtc_state *kcrtc_st);
int komeda_kms_setup_crtcs(struct komeda_kms_dev *kms, struct komeda_dev *mdev);
@ -123,6 +190,8 @@ int komeda_kms_add_crtcs(struct komeda_kms_dev *kms, struct komeda_dev *mdev);
int komeda_kms_add_planes(struct komeda_kms_dev *kms, struct komeda_dev *mdev);
int komeda_kms_add_private_objs(struct komeda_kms_dev *kms,
struct komeda_dev *mdev);
int komeda_kms_add_wb_connectors(struct komeda_kms_dev *kms,
struct komeda_dev *mdev);
void komeda_kms_cleanup_private_objs(struct komeda_kms_dev *kms);
void komeda_crtc_handle_event(struct komeda_crtc *kcrtc,

View File

@ -53,7 +53,6 @@ void komeda_pipeline_destroy(struct komeda_dev *mdev,
}
clk_put(pipe->pxlclk);
clk_put(pipe->aclk);
of_node_put(pipe->of_output_dev);
of_node_put(pipe->of_output_port);
@ -92,6 +91,12 @@ komeda_pipeline_get_component_pos(struct komeda_pipeline *pipe, int id)
case KOMEDA_COMPONENT_SCALER1:
pos = to_cpos(pipe->scalers[id - KOMEDA_COMPONENT_SCALER0]);
break;
case KOMEDA_COMPONENT_SPLITTER:
pos = to_cpos(pipe->splitter);
break;
case KOMEDA_COMPONENT_MERGER:
pos = to_cpos(pipe->merger);
break;
case KOMEDA_COMPONENT_IPS0:
case KOMEDA_COMPONENT_IPS1:
temp = mdev->pipelines[id - KOMEDA_COMPONENT_IPS0];
@ -126,6 +131,28 @@ komeda_pipeline_get_component(struct komeda_pipeline *pipe, int id)
return c;
}
struct komeda_component *
komeda_pipeline_get_first_component(struct komeda_pipeline *pipe,
u32 comp_mask)
{
struct komeda_component *c = NULL;
int id;
id = find_first_bit((unsigned long *)&comp_mask, 32);
if (id < 32)
c = komeda_pipeline_get_component(pipe, id);
return c;
}
static struct komeda_component *
komeda_component_pickup_input(struct komeda_component *c, u32 avail_comps)
{
u32 avail_inputs = c->supported_inputs & (avail_comps);
return komeda_pipeline_get_first_component(c->pipeline, avail_inputs);
}
/** komeda_component_add - Add a component to &komeda_pipeline */
struct komeda_component *
komeda_component_add(struct komeda_pipeline *pipe,
@ -249,16 +276,49 @@ static void komeda_component_verify_inputs(struct komeda_component *c)
}
}
static struct komeda_layer *
komeda_get_layer_split_right_layer(struct komeda_pipeline *pipe,
struct komeda_layer *left)
{
int index = left->base.id - KOMEDA_COMPONENT_LAYER0;
int i;
for (i = index + 1; i < pipe->n_layers; i++)
if (left->layer_type == pipe->layers[i]->layer_type)
return pipe->layers[i];
return NULL;
}
static void komeda_pipeline_assemble(struct komeda_pipeline *pipe)
{
struct komeda_component *c;
int id;
struct komeda_layer *layer;
int i, id;
dp_for_each_set_bit(id, pipe->avail_comps) {
c = komeda_pipeline_get_component(pipe, id);
komeda_component_verify_inputs(c);
}
/* calculate right layer for the layer split */
for (i = 0; i < pipe->n_layers; i++) {
layer = pipe->layers[i];
layer->right = komeda_get_layer_split_right_layer(pipe, layer);
}
}
/* if pipeline_A accept another pipeline_B's component as input, treat
* pipeline_B as slave of pipeline_A.
*/
struct komeda_pipeline *
komeda_pipeline_get_slave(struct komeda_pipeline *master)
{
struct komeda_component *slave;
slave = komeda_component_pickup_input(&master->compiz->base,
KOMEDA_PIPELINE_COMPIZS);
return slave ? slave->pipeline : NULL;
}
int komeda_assemble_pipelines(struct komeda_dev *mdev)

View File

@ -228,6 +228,12 @@ struct komeda_layer {
struct malidp_range hsize_in, vsize_in;
u32 layer_type; /* RICH, SIMPLE or WB */
u32 supported_rots;
/* komeda supports layer split which splits a whole image to two parts
* left and right and handle them by two individual layer processors
* Note: left/right are always according to the final display rect,
* not the source buffer.
*/
struct komeda_layer *right;
};
struct komeda_layer_state {
@ -235,16 +241,34 @@ struct komeda_layer_state {
/* layer specific configuration state */
u16 hsize, vsize;
u32 rot;
u16 afbc_crop_l;
u16 afbc_crop_r;
u16 afbc_crop_t;
u16 afbc_crop_b;
dma_addr_t addr[3];
};
struct komeda_scaler {
struct komeda_component base;
/* scaler features and caps */
struct malidp_range hsize, vsize;
u32 max_upscaling;
u32 max_downscaling;
u8 scaling_split_overlap; /* split overlap for scaling */
u8 enh_split_overlap; /* split overlap for image enhancement */
};
struct komeda_scaler_state {
struct komeda_component_state base;
u16 hsize_in, vsize_in;
u16 hsize_out, vsize_out;
u16 total_hsize_in, total_vsize_in;
u16 total_hsize_out; /* total_xxxx are size before split */
u16 left_crop, right_crop;
u8 en_scaling : 1,
en_alpha : 1, /* enable alpha processing */
en_img_enhancement : 1,
en_split : 1,
right_part : 1; /* right part of split image */
};
struct komeda_compiz {
@ -265,6 +289,29 @@ struct komeda_compiz_state {
struct komeda_compiz_input_cfg cins[KOMEDA_COMPONENT_N_INPUTS];
};
struct komeda_merger {
struct komeda_component base;
struct malidp_range hsize_merged;
struct malidp_range vsize_merged;
};
struct komeda_merger_state {
struct komeda_component_state base;
u16 hsize_merged;
u16 vsize_merged;
};
struct komeda_splitter {
struct komeda_component base;
struct malidp_range hsize, vsize;
};
struct komeda_splitter_state {
struct komeda_component_state base;
u16 hsize, vsize;
u16 overlap;
};
struct komeda_improc {
struct komeda_component base;
u32 supported_color_formats; /* DRM_RGB/YUV444/YUV420*/
@ -300,13 +347,27 @@ struct komeda_data_flow_cfg {
struct komeda_component_output input;
u16 in_x, in_y, in_w, in_h;
u32 out_x, out_y, out_w, out_h;
u16 total_in_h, total_in_w;
u16 total_out_w;
u16 left_crop, right_crop, overlap;
u32 rot;
int blending_zorder;
u8 pixel_blend_mode, layer_alpha;
u8 en_scaling : 1,
en_img_enhancement : 1,
en_split : 1,
is_yuv : 1,
right_part : 1; /* right part of display image if split enabled */
};
/** struct komeda_pipeline_funcs */
struct komeda_pipeline_funcs {
/* check if the aclk (main engine clock) can satisfy the clock
* requirements of the downscaling that specified by dflow
*/
int (*downscaling_clk_check)(struct komeda_pipeline *pipe,
struct drm_display_mode *mode,
unsigned long aclk_rate,
struct komeda_data_flow_cfg *dflow);
/* dump_register: Optional, dump registers to seq_file */
void (*dump_register)(struct komeda_pipeline *pipe,
struct seq_file *sf);
@ -324,8 +385,6 @@ struct komeda_pipeline {
struct komeda_dev *mdev;
/** @pxlclk: pixel clock */
struct clk *pxlclk;
/** @aclk: AXI clock */
struct clk *aclk;
/** @id: pipeline id */
int id;
/** @avail_comps: available components mask of pipeline */
@ -340,6 +399,10 @@ struct komeda_pipeline {
struct komeda_scaler *scalers[KOMEDA_PIPELINE_MAX_SCALERS];
/** @compiz: compositor */
struct komeda_compiz *compiz;
/** @splitter: for split the compiz output to two half data flows */
struct komeda_splitter *splitter;
/** @merger: merger */
struct komeda_merger *merger;
/** @wb_layer: writeback layer */
struct komeda_layer *wb_layer;
/** @improc: post image processor */
@ -382,17 +445,21 @@ struct komeda_pipeline_state {
#define to_layer(c) container_of(c, struct komeda_layer, base)
#define to_compiz(c) container_of(c, struct komeda_compiz, base)
#define to_scaler(c) container_of(c, struct komeda_scaler, base)
#define to_splitter(c) container_of(c, struct komeda_splitter, base)
#define to_merger(c) container_of(c, struct komeda_merger, base)
#define to_improc(c) container_of(c, struct komeda_improc, base)
#define to_ctrlr(c) container_of(c, struct komeda_timing_ctrlr, base)
#define to_layer_st(c) container_of(c, struct komeda_layer_state, base)
#define to_compiz_st(c) container_of(c, struct komeda_compiz_state, base)
#define to_scaler_st(c) container_of(c, struct komeda_scaler_state, base)
#define to_scaler_st(c) container_of(c, struct komeda_scaler_state, base)
#define to_splitter_st(c) container_of(c, struct komeda_splitter_state, base)
#define to_merger_st(c) container_of(c, struct komeda_merger_state, base)
#define to_improc_st(c) container_of(c, struct komeda_improc_state, base)
#define to_ctrlr_st(c) container_of(c, struct komeda_timing_ctrlr_state, base)
#define priv_to_comp_st(o) container_of(o, struct komeda_component_state, obj)
#define priv_to_pipe_st(o) container_of(o, struct komeda_pipeline_state, obj)
#define priv_to_pipe_st(o) container_of(o, struct komeda_pipeline_state, obj)
/* pipeline APIs */
struct komeda_pipeline *
@ -400,9 +467,14 @@ komeda_pipeline_add(struct komeda_dev *mdev, size_t size,
const struct komeda_pipeline_funcs *funcs);
void komeda_pipeline_destroy(struct komeda_dev *mdev,
struct komeda_pipeline *pipe);
struct komeda_pipeline *
komeda_pipeline_get_slave(struct komeda_pipeline *master);
int komeda_assemble_pipelines(struct komeda_dev *mdev);
struct komeda_component *
komeda_pipeline_get_component(struct komeda_pipeline *pipe, int id);
struct komeda_component *
komeda_pipeline_get_first_component(struct komeda_pipeline *pipe,
u32 comp_mask);
void komeda_pipeline_dump_register(struct komeda_pipeline *pipe,
struct seq_file *sf);
@ -419,17 +491,41 @@ komeda_component_add(struct komeda_pipeline *pipe,
void komeda_component_destroy(struct komeda_dev *mdev,
struct komeda_component *c);
static inline struct komeda_component *
komeda_component_pickup_output(struct komeda_component *c, u32 avail_comps)
{
u32 avail_inputs = c->supported_outputs & (avail_comps);
return komeda_pipeline_get_first_component(c->pipeline, avail_inputs);
}
struct komeda_plane_state;
struct komeda_crtc_state;
struct komeda_crtc;
void pipeline_composition_size(struct komeda_crtc_state *kcrtc_st,
u16 *hsize, u16 *vsize);
int komeda_build_layer_data_flow(struct komeda_layer *layer,
struct komeda_plane_state *kplane_st,
struct komeda_crtc_state *kcrtc_st,
struct komeda_data_flow_cfg *dflow);
int komeda_build_wb_data_flow(struct komeda_layer *wb_layer,
struct drm_connector_state *conn_st,
struct komeda_crtc_state *kcrtc_st,
struct komeda_data_flow_cfg *dflow);
int komeda_build_display_data_flow(struct komeda_crtc *kcrtc,
struct komeda_crtc_state *kcrtc_st);
int komeda_build_layer_split_data_flow(struct komeda_layer *left,
struct komeda_plane_state *kplane_st,
struct komeda_crtc_state *kcrtc_st,
struct komeda_data_flow_cfg *dflow);
int komeda_build_wb_split_data_flow(struct komeda_layer *wb_layer,
struct drm_connector_state *conn_st,
struct komeda_crtc_state *kcrtc_st,
struct komeda_data_flow_cfg *dflow);
int komeda_release_unclaimed_resources(struct komeda_pipeline *pipe,
struct komeda_crtc_state *kcrtc_st);
@ -441,4 +537,7 @@ void komeda_pipeline_disable(struct komeda_pipeline *pipe,
void komeda_pipeline_update(struct komeda_pipeline *pipe,
struct drm_atomic_state *old_state);
void komeda_complete_data_flow_cfg(struct komeda_data_flow_cfg *dflow,
struct drm_framebuffer *fb);
#endif /* _KOMEDA_PIPELINE_H_*/

View File

@ -211,13 +211,14 @@ komeda_component_check_input(struct komeda_component_state *state,
struct komeda_component *c = state->component;
if ((idx < 0) || (idx >= c->max_active_inputs)) {
DRM_DEBUG_ATOMIC("%s invalid input id: %d.\n", c->name, idx);
DRM_DEBUG_ATOMIC("%s required an invalid %s-input[%d].\n",
input->component->name, c->name, idx);
return -EINVAL;
}
if (has_bit(idx, state->active_inputs)) {
DRM_DEBUG_ATOMIC("%s required input_id: %d has been occupied already.\n",
c->name, idx);
DRM_DEBUG_ATOMIC("%s required %s-input[%d] has been occupied already.\n",
input->component->name, c->name, idx);
return -EINVAL;
}
@ -249,18 +250,67 @@ komeda_component_validate_private(struct komeda_component *c,
return err;
}
/* Get current available scaler from the component->supported_outputs */
static struct komeda_scaler *
komeda_component_get_avail_scaler(struct komeda_component *c,
struct drm_atomic_state *state)
{
struct komeda_pipeline_state *pipe_st;
u32 avail_scalers;
pipe_st = komeda_pipeline_get_state(c->pipeline, state);
if (!pipe_st)
return NULL;
avail_scalers = (pipe_st->active_comps & KOMEDA_PIPELINE_SCALERS) ^
KOMEDA_PIPELINE_SCALERS;
c = komeda_component_pickup_output(c, avail_scalers);
return to_scaler(c);
}
static void
komeda_rotate_data_flow(struct komeda_data_flow_cfg *dflow, u32 rot)
{
if (drm_rotation_90_or_270(rot)) {
swap(dflow->in_h, dflow->in_w);
swap(dflow->total_in_h, dflow->total_in_w);
}
}
static int
komeda_layer_check_cfg(struct komeda_layer *layer,
struct komeda_plane_state *kplane_st,
struct komeda_fb *kfb,
struct komeda_data_flow_cfg *dflow)
{
if (!in_range(&layer->hsize_in, dflow->in_w)) {
DRM_DEBUG_ATOMIC("src_w: %d is out of range.\n", dflow->in_w);
u32 src_x, src_y, src_w, src_h;
if (!komeda_fb_is_layer_supported(kfb, layer->layer_type, dflow->rot))
return -EINVAL;
if (layer->base.id == KOMEDA_COMPONENT_WB_LAYER) {
src_x = dflow->out_x;
src_y = dflow->out_y;
src_w = dflow->out_w;
src_h = dflow->out_h;
} else {
src_x = dflow->in_x;
src_y = dflow->in_y;
src_w = dflow->in_w;
src_h = dflow->in_h;
}
if (komeda_fb_check_src_coords(kfb, src_x, src_y, src_w, src_h))
return -EINVAL;
if (!in_range(&layer->hsize_in, src_w)) {
DRM_DEBUG_ATOMIC("invalidate src_w %d.\n", src_w);
return -EINVAL;
}
if (!in_range(&layer->vsize_in, dflow->in_h)) {
DRM_DEBUG_ATOMIC("src_h: %d is out of range.\n", dflow->in_h);
if (!in_range(&layer->vsize_in, src_h)) {
DRM_DEBUG_ATOMIC("invalidate src_h %d.\n", src_h);
return -EINVAL;
}
@ -279,7 +329,7 @@ komeda_layer_validate(struct komeda_layer *layer,
struct komeda_layer_state *st;
int i, err;
err = komeda_layer_check_cfg(layer, kplane_st, dflow);
err = komeda_layer_check_cfg(layer, kfb, dflow);
if (err)
return err;
@ -291,8 +341,22 @@ komeda_layer_validate(struct komeda_layer *layer,
st = to_layer_st(c_st);
st->rot = dflow->rot;
st->hsize = kfb->aligned_w;
st->vsize = kfb->aligned_h;
if (fb->modifier) {
st->hsize = kfb->aligned_w;
st->vsize = kfb->aligned_h;
st->afbc_crop_l = dflow->in_x;
st->afbc_crop_r = kfb->aligned_w - dflow->in_x - dflow->in_w;
st->afbc_crop_t = dflow->in_y;
st->afbc_crop_b = kfb->aligned_h - dflow->in_y - dflow->in_h;
} else {
st->hsize = dflow->in_w;
st->vsize = dflow->in_h;
st->afbc_crop_l = 0;
st->afbc_crop_r = 0;
st->afbc_crop_t = 0;
st->afbc_crop_b = 0;
}
for (i = 0; i < fb->format->num_planes; i++)
st->addr[i] = komeda_fb_get_pixel_addr(kfb, dflow->in_x,
@ -305,11 +369,275 @@ komeda_layer_validate(struct komeda_layer *layer,
/* update the data flow for the next stage */
komeda_component_set_output(&dflow->input, &layer->base, 0);
/*
* The rotation has been handled by layer, so adjusted the data flow for
* the next stage.
*/
komeda_rotate_data_flow(dflow, st->rot);
return 0;
}
static void pipeline_composition_size(struct komeda_crtc_state *kcrtc_st,
u16 *hsize, u16 *vsize)
static int
komeda_wb_layer_validate(struct komeda_layer *wb_layer,
struct drm_connector_state *conn_st,
struct komeda_data_flow_cfg *dflow)
{
struct komeda_fb *kfb = to_kfb(conn_st->writeback_job->fb);
struct komeda_component_state *c_st;
struct komeda_layer_state *st;
int i, err;
err = komeda_layer_check_cfg(wb_layer, kfb, dflow);
if (err)
return err;
c_st = komeda_component_get_state_and_set_user(&wb_layer->base,
conn_st->state, conn_st->connector, conn_st->crtc);
if (IS_ERR(c_st))
return PTR_ERR(c_st);
st = to_layer_st(c_st);
st->hsize = dflow->out_w;
st->vsize = dflow->out_h;
for (i = 0; i < kfb->base.format->num_planes; i++)
st->addr[i] = komeda_fb_get_pixel_addr(kfb, dflow->out_x,
dflow->out_y, i);
komeda_component_add_input(&st->base, &dflow->input, 0);
komeda_component_set_output(&dflow->input, &wb_layer->base, 0);
return 0;
}
static bool scaling_ratio_valid(u32 size_in, u32 size_out,
u32 max_upscaling, u32 max_downscaling)
{
if (size_out > size_in * max_upscaling)
return false;
else if (size_in > size_out * max_downscaling)
return false;
return true;
}
static int
komeda_scaler_check_cfg(struct komeda_scaler *scaler,
struct komeda_crtc_state *kcrtc_st,
struct komeda_data_flow_cfg *dflow)
{
u32 hsize_in, vsize_in, hsize_out, vsize_out;
u32 max_upscaling;
hsize_in = dflow->in_w;
vsize_in = dflow->in_h;
hsize_out = dflow->out_w;
vsize_out = dflow->out_h;
if (!in_range(&scaler->hsize, hsize_in) ||
!in_range(&scaler->hsize, hsize_out)) {
DRM_DEBUG_ATOMIC("Invalid horizontal sizes");
return -EINVAL;
}
if (!in_range(&scaler->vsize, vsize_in) ||
!in_range(&scaler->vsize, vsize_out)) {
DRM_DEBUG_ATOMIC("Invalid vertical sizes");
return -EINVAL;
}
/* If input comes from compiz that means the scaling is for writeback
* and scaler can not do upscaling for writeback
*/
if (has_bit(dflow->input.component->id, KOMEDA_PIPELINE_COMPIZS))
max_upscaling = 1;
else
max_upscaling = scaler->max_upscaling;
if (!scaling_ratio_valid(hsize_in, hsize_out, max_upscaling,
scaler->max_downscaling)) {
DRM_DEBUG_ATOMIC("Invalid horizontal scaling ratio");
return -EINVAL;
}
if (!scaling_ratio_valid(vsize_in, vsize_out, max_upscaling,
scaler->max_downscaling)) {
DRM_DEBUG_ATOMIC("Invalid vertical scaling ratio");
return -EINVAL;
}
if (hsize_in > hsize_out || vsize_in > vsize_out) {
struct komeda_pipeline *pipe = scaler->base.pipeline;
int err;
err = pipe->funcs->downscaling_clk_check(pipe,
&kcrtc_st->base.adjusted_mode,
komeda_calc_aclk(kcrtc_st), dflow);
if (err) {
DRM_DEBUG_ATOMIC("aclk can't satisfy the clock requirement of the downscaling\n");
return err;
}
}
return 0;
}
static int
komeda_scaler_validate(void *user,
struct komeda_crtc_state *kcrtc_st,
struct komeda_data_flow_cfg *dflow)
{
struct drm_atomic_state *drm_st = kcrtc_st->base.state;
struct komeda_component_state *c_st;
struct komeda_scaler_state *st;
struct komeda_scaler *scaler;
int err = 0;
if (!(dflow->en_scaling || dflow->en_img_enhancement))
return 0;
scaler = komeda_component_get_avail_scaler(dflow->input.component,
drm_st);
if (!scaler) {
DRM_DEBUG_ATOMIC("No scaler available");
return -EINVAL;
}
err = komeda_scaler_check_cfg(scaler, kcrtc_st, dflow);
if (err)
return err;
c_st = komeda_component_get_state_and_set_user(&scaler->base,
drm_st, user, kcrtc_st->base.crtc);
if (IS_ERR(c_st))
return PTR_ERR(c_st);
st = to_scaler_st(c_st);
st->hsize_in = dflow->in_w;
st->vsize_in = dflow->in_h;
st->hsize_out = dflow->out_w;
st->vsize_out = dflow->out_h;
st->right_crop = dflow->right_crop;
st->left_crop = dflow->left_crop;
st->total_vsize_in = dflow->total_in_h;
st->total_hsize_in = dflow->total_in_w;
st->total_hsize_out = dflow->total_out_w;
/* Enable alpha processing if the next stage needs the pixel alpha */
st->en_alpha = dflow->pixel_blend_mode != DRM_MODE_BLEND_PIXEL_NONE;
st->en_scaling = dflow->en_scaling;
st->en_img_enhancement = dflow->en_img_enhancement;
st->en_split = dflow->en_split;
st->right_part = dflow->right_part;
komeda_component_add_input(&st->base, &dflow->input, 0);
komeda_component_set_output(&dflow->input, &scaler->base, 0);
return err;
}
static void komeda_split_data_flow(struct komeda_scaler *scaler,
struct komeda_data_flow_cfg *dflow,
struct komeda_data_flow_cfg *l_dflow,
struct komeda_data_flow_cfg *r_dflow);
static int
komeda_splitter_validate(struct komeda_splitter *splitter,
struct drm_connector_state *conn_st,
struct komeda_data_flow_cfg *dflow,
struct komeda_data_flow_cfg *l_output,
struct komeda_data_flow_cfg *r_output)
{
struct komeda_component_state *c_st;
struct komeda_splitter_state *st;
if (!splitter) {
DRM_DEBUG_ATOMIC("Current HW doesn't support splitter.\n");
return -EINVAL;
}
if (!in_range(&splitter->hsize, dflow->in_w)) {
DRM_DEBUG_ATOMIC("split in_w:%d is out of the acceptable range.\n",
dflow->in_w);
return -EINVAL;
}
if (!in_range(&splitter->vsize, dflow->in_h)) {
DRM_DEBUG_ATOMIC("split in_in: %d exceed the acceptable range.\n",
dflow->in_w);
return -EINVAL;
}
c_st = komeda_component_get_state_and_set_user(&splitter->base,
conn_st->state, conn_st->connector, conn_st->crtc);
if (IS_ERR(c_st))
return PTR_ERR(c_st);
komeda_split_data_flow(splitter->base.pipeline->scalers[0],
dflow, l_output, r_output);
st = to_splitter_st(c_st);
st->hsize = dflow->in_w;
st->vsize = dflow->in_h;
st->overlap = dflow->overlap;
komeda_component_add_input(&st->base, &dflow->input, 0);
komeda_component_set_output(&l_output->input, &splitter->base, 0);
komeda_component_set_output(&r_output->input, &splitter->base, 1);
return 0;
}
static int
komeda_merger_validate(struct komeda_merger *merger,
void *user,
struct komeda_crtc_state *kcrtc_st,
struct komeda_data_flow_cfg *left_input,
struct komeda_data_flow_cfg *right_input,
struct komeda_data_flow_cfg *output)
{
struct komeda_component_state *c_st;
struct komeda_merger_state *st;
int err = 0;
if (!merger) {
DRM_DEBUG_ATOMIC("No merger is available");
return -EINVAL;
}
if (!in_range(&merger->hsize_merged, output->out_w)) {
DRM_DEBUG_ATOMIC("merged_w: %d is out of the accepted range.\n",
output->out_w);
return -EINVAL;
}
if (!in_range(&merger->vsize_merged, output->out_h)) {
DRM_DEBUG_ATOMIC("merged_h: %d is out of the accepted range.\n",
output->out_h);
return -EINVAL;
}
c_st = komeda_component_get_state_and_set_user(&merger->base,
kcrtc_st->base.state, kcrtc_st->base.crtc, kcrtc_st->base.crtc);
if (IS_ERR(c_st))
return PTR_ERR(c_st);
st = to_merger_st(c_st);
st->hsize_merged = output->out_w;
st->vsize_merged = output->out_h;
komeda_component_add_input(c_st, &left_input->input, 0);
komeda_component_add_input(c_st, &right_input->input, 1);
komeda_component_set_output(&output->input, &merger->base, 0);
return err;
}
void pipeline_composition_size(struct komeda_crtc_state *kcrtc_st,
u16 *hsize, u16 *vsize)
{
struct drm_display_mode *m = &kcrtc_st->base.adjusted_mode;
@ -366,6 +694,7 @@ komeda_compiz_set_input(struct komeda_compiz *compiz,
c_st->changed_active_inputs |= BIT(idx);
komeda_component_add_input(c_st, &dflow->input, idx);
komeda_component_set_output(&dflow->input, &compiz->base, 0);
return 0;
}
@ -455,6 +784,36 @@ komeda_timing_ctrlr_validate(struct komeda_timing_ctrlr *ctrlr,
return 0;
}
void komeda_complete_data_flow_cfg(struct komeda_data_flow_cfg *dflow,
struct drm_framebuffer *fb)
{
u32 w = dflow->in_w;
u32 h = dflow->in_h;
dflow->total_in_w = dflow->in_w;
dflow->total_in_h = dflow->in_h;
dflow->total_out_w = dflow->out_w;
/* if format doesn't have alpha, fix blend mode to PIXEL_NONE */
if (!fb->format->has_alpha)
dflow->pixel_blend_mode = DRM_MODE_BLEND_PIXEL_NONE;
if (drm_rotation_90_or_270(dflow->rot))
swap(w, h);
dflow->en_scaling = (w != dflow->out_w) || (h != dflow->out_h);
dflow->is_yuv = fb->format->is_yuv;
}
static bool merger_is_available(struct komeda_pipeline *pipe,
struct komeda_data_flow_cfg *dflow)
{
u32 avail_inputs = pipe->merger ?
pipe->merger->base.supported_inputs : 0;
return has_bit(dflow->input.component->id, avail_inputs);
}
int komeda_build_layer_data_flow(struct komeda_layer *layer,
struct komeda_plane_state *kplane_st,
struct komeda_crtc_state *kcrtc_st,
@ -473,11 +832,290 @@ int komeda_build_layer_data_flow(struct komeda_layer *layer,
if (err)
return err;
err = komeda_scaler_validate(plane, kcrtc_st, dflow);
if (err)
return err;
/* if split, check if can put the data flow into merger */
if (dflow->en_split && merger_is_available(pipe, dflow))
return 0;
err = komeda_compiz_set_input(pipe->compiz, kcrtc_st, dflow);
return err;
}
/*
* Split is introduced for workaround scaler's input/output size limitation.
* The idea is simple, if one scaler can not fit the requirement, use two.
* So split splits the big source image to two half parts (left/right) and do
* the scaling by two scaler separately and independently.
* But split also imports an edge problem in the middle of the image when
* scaling, to avoid it, split isn't a simple half-and-half, but add an extra
* pixels (overlap) to both side, after split the left/right will be:
* - left: [0, src_length/2 + overlap]
* - right: [src_length/2 - overlap, src_length]
* The extra overlap do eliminate the edge problem, but which may also generates
* unnecessary pixels when scaling, we need to crop them before scaler output
* the result to the next stage. and for the how to crop, it depends on the
* unneeded pixels, another words the position where overlay has been added.
* - left: crop the right
* - right: crop the left
*
* The diagram for how to do the split
*
* <---------------------left->out_w ---------------->
* |--------------------------------|---right_crop-----| <- left after split
* \ \ /
* \ \<--overlap--->/
* |-----------------|-------------|(Middle)------|-----------------| <- src
* /<---overlap--->\ \
* / \ \
* right after split->|-----left_crop---|--------------------------------|
* ^<------------------- right->out_w --------------->^
*
* NOTE: To consistent with HW the output_w always contains the crop size.
*/
static void komeda_split_data_flow(struct komeda_scaler *scaler,
struct komeda_data_flow_cfg *dflow,
struct komeda_data_flow_cfg *l_dflow,
struct komeda_data_flow_cfg *r_dflow)
{
bool r90 = drm_rotation_90_or_270(dflow->rot);
bool flip_h = has_flip_h(dflow->rot);
u32 l_out, r_out, overlap;
memcpy(l_dflow, dflow, sizeof(*dflow));
memcpy(r_dflow, dflow, sizeof(*dflow));
l_dflow->right_part = false;
r_dflow->right_part = true;
r_dflow->blending_zorder = dflow->blending_zorder + 1;
overlap = 0;
if (dflow->en_scaling && scaler)
overlap += scaler->scaling_split_overlap;
/* original dflow may fed into splitter, and which doesn't need
* enhancement overlap
*/
dflow->overlap = overlap;
if (dflow->en_img_enhancement && scaler)
overlap += scaler->enh_split_overlap;
l_dflow->overlap = overlap;
r_dflow->overlap = overlap;
/* split the origin content */
/* left/right here always means the left/right part of display image,
* not the source Image
*/
/* DRM rotation is anti-clockwise */
if (r90) {
if (dflow->en_scaling) {
l_dflow->in_h = ALIGN(dflow->in_h, 2) / 2 + l_dflow->overlap;
r_dflow->in_h = l_dflow->in_h;
} else if (dflow->en_img_enhancement) {
/* enhancer only */
l_dflow->in_h = ALIGN(dflow->in_h, 2) / 2 + l_dflow->overlap;
r_dflow->in_h = dflow->in_h / 2 + r_dflow->overlap;
} else {
/* split without scaler, no overlap */
l_dflow->in_h = ALIGN(((dflow->in_h + 1) >> 1), 2);
r_dflow->in_h = dflow->in_h - l_dflow->in_h;
}
/* Consider YUV format, after split, the split source w/h
* may not aligned to 2. we have two choices for such case.
* 1. scaler is enabled (overlap != 0), we can do a alignment
* both left/right and crop the extra data by scaler.
* 2. scaler is not enabled, only align the split left
* src/disp, and the rest part assign to right
*/
if ((overlap != 0) && dflow->is_yuv) {
l_dflow->in_h = ALIGN(l_dflow->in_h, 2);
r_dflow->in_h = ALIGN(r_dflow->in_h, 2);
}
if (flip_h)
l_dflow->in_y = dflow->in_y + dflow->in_h - l_dflow->in_h;
else
r_dflow->in_y = dflow->in_y + dflow->in_h - r_dflow->in_h;
} else {
if (dflow->en_scaling) {
l_dflow->in_w = ALIGN(dflow->in_w, 2) / 2 + l_dflow->overlap;
r_dflow->in_w = l_dflow->in_w;
} else if (dflow->en_img_enhancement) {
l_dflow->in_w = ALIGN(dflow->in_w, 2) / 2 + l_dflow->overlap;
r_dflow->in_w = dflow->in_w / 2 + r_dflow->overlap;
} else {
l_dflow->in_w = ALIGN(((dflow->in_w + 1) >> 1), 2);
r_dflow->in_w = dflow->in_w - l_dflow->in_w;
}
/* do YUV alignment when scaler enabled */
if ((overlap != 0) && dflow->is_yuv) {
l_dflow->in_w = ALIGN(l_dflow->in_w, 2);
r_dflow->in_w = ALIGN(r_dflow->in_w, 2);
}
/* on flip_h, the left display content from the right-source */
if (flip_h)
l_dflow->in_x = dflow->in_w + dflow->in_x - l_dflow->in_w;
else
r_dflow->in_x = dflow->in_w + dflow->in_x - r_dflow->in_w;
}
/* split the disp_rect */
if (dflow->en_scaling || dflow->en_img_enhancement)
l_dflow->out_w = ((dflow->out_w + 1) >> 1);
else
l_dflow->out_w = ALIGN(((dflow->out_w + 1) >> 1), 2);
r_dflow->out_w = dflow->out_w - l_dflow->out_w;
l_dflow->out_x = dflow->out_x;
r_dflow->out_x = l_dflow->out_w + l_dflow->out_x;
/* calculate the scaling crop */
/* left scaler output more data and do crop */
if (r90) {
l_out = (dflow->out_w * l_dflow->in_h) / dflow->in_h;
r_out = (dflow->out_w * r_dflow->in_h) / dflow->in_h;
} else {
l_out = (dflow->out_w * l_dflow->in_w) / dflow->in_w;
r_out = (dflow->out_w * r_dflow->in_w) / dflow->in_w;
}
l_dflow->left_crop = 0;
l_dflow->right_crop = l_out - l_dflow->out_w;
r_dflow->left_crop = r_out - r_dflow->out_w;
r_dflow->right_crop = 0;
/* out_w includes the crop length */
l_dflow->out_w += l_dflow->right_crop + l_dflow->left_crop;
r_dflow->out_w += r_dflow->right_crop + r_dflow->left_crop;
}
/* For layer split, a plane state will be split to two data flows and handled
* by two separated komeda layer input pipelines. komeda supports two types of
* layer split:
* - none-scaling split:
* / layer-left -> \
* plane_state compiz-> ...
* \ layer-right-> /
*
* - scaling split:
* / layer-left -> scaler->\
* plane_state merger -> compiz-> ...
* \ layer-right-> scaler->/
*
* Since merger only supports scaler as input, so for none-scaling split, two
* layer data flows will be output to compiz directly. for scaling_split, two
* data flow will be merged by merger firstly, then merger outputs one merged
* data flow to compiz.
*/
int komeda_build_layer_split_data_flow(struct komeda_layer *left,
struct komeda_plane_state *kplane_st,
struct komeda_crtc_state *kcrtc_st,
struct komeda_data_flow_cfg *dflow)
{
struct drm_plane *plane = kplane_st->base.plane;
struct komeda_pipeline *pipe = left->base.pipeline;
struct komeda_layer *right = left->right;
struct komeda_data_flow_cfg l_dflow, r_dflow;
int err;
komeda_split_data_flow(pipe->scalers[0], dflow, &l_dflow, &r_dflow);
DRM_DEBUG_ATOMIC("Assign %s + %s to [PLANE:%d:%s]: "
"src[x/y:%d/%d, w/h:%d/%d] disp[x/y:%d/%d, w/h:%d/%d]",
left->base.name, right->base.name,
plane->base.id, plane->name,
dflow->in_x, dflow->in_y, dflow->in_w, dflow->in_h,
dflow->out_x, dflow->out_y, dflow->out_w, dflow->out_h);
err = komeda_build_layer_data_flow(left, kplane_st, kcrtc_st, &l_dflow);
if (err)
return err;
err = komeda_build_layer_data_flow(right, kplane_st, kcrtc_st, &r_dflow);
if (err)
return err;
/* The rotation has been handled by layer, so adjusted the data flow */
komeda_rotate_data_flow(dflow, dflow->rot);
/* left and right dflow has been merged to compiz already,
* no need merger to merge them anymore.
*/
if (r_dflow.input.component == l_dflow.input.component)
return 0;
/* line merger path */
err = komeda_merger_validate(pipe->merger, plane, kcrtc_st,
&l_dflow, &r_dflow, dflow);
if (err)
return err;
err = komeda_compiz_set_input(pipe->compiz, kcrtc_st, dflow);
return err;
}
/* writeback data path: compiz -> scaler -> wb_layer -> memory */
int komeda_build_wb_data_flow(struct komeda_layer *wb_layer,
struct drm_connector_state *conn_st,
struct komeda_crtc_state *kcrtc_st,
struct komeda_data_flow_cfg *dflow)
{
struct drm_connector *conn = conn_st->connector;
int err;
err = komeda_scaler_validate(conn, kcrtc_st, dflow);
if (err)
return err;
return komeda_wb_layer_validate(wb_layer, conn_st, dflow);
}
/* writeback scaling split data path:
* /-> scaler ->\
* compiz -> splitter merger -> wb_layer -> memory
* \-> scaler ->/
*/
int komeda_build_wb_split_data_flow(struct komeda_layer *wb_layer,
struct drm_connector_state *conn_st,
struct komeda_crtc_state *kcrtc_st,
struct komeda_data_flow_cfg *dflow)
{
struct komeda_pipeline *pipe = wb_layer->base.pipeline;
struct drm_connector *conn = conn_st->connector;
struct komeda_data_flow_cfg l_dflow, r_dflow;
int err;
err = komeda_splitter_validate(pipe->splitter, conn_st,
dflow, &l_dflow, &r_dflow);
if (err)
return err;
err = komeda_scaler_validate(conn, kcrtc_st, &l_dflow);
if (err)
return err;
err = komeda_scaler_validate(conn, kcrtc_st, &r_dflow);
if (err)
return err;
err = komeda_merger_validate(pipe->merger, conn_st, kcrtc_st,
&l_dflow, &r_dflow, dflow);
if (err)
return err;
return komeda_wb_layer_validate(wb_layer, conn_st, dflow);
}
/* build display output data flow, the data path is:
* compiz -> improc -> timing_ctrlr
*/
@ -485,10 +1123,25 @@ int komeda_build_display_data_flow(struct komeda_crtc *kcrtc,
struct komeda_crtc_state *kcrtc_st)
{
struct komeda_pipeline *master = kcrtc->master;
struct komeda_pipeline *slave = kcrtc->slave;
struct komeda_data_flow_cfg m_dflow; /* master data flow */
struct komeda_data_flow_cfg s_dflow; /* slave data flow */
int err;
memset(&m_dflow, 0, sizeof(m_dflow));
memset(&s_dflow, 0, sizeof(s_dflow));
if (slave && has_bit(slave->id, kcrtc_st->active_pipes)) {
err = komeda_compiz_validate(slave->compiz, kcrtc_st, &s_dflow);
if (err)
return err;
/* merge the slave dflow into master pipeline */
err = komeda_compiz_set_input(master->compiz, kcrtc_st,
&s_dflow);
if (err)
return err;
}
err = komeda_compiz_validate(master->compiz, kcrtc_st, &m_dflow);
if (err)

View File

@ -10,20 +10,32 @@
#include <drm/drm_print.h>
#include "komeda_dev.h"
#include "komeda_kms.h"
#include "komeda_framebuffer.h"
static int
komeda_plane_init_data_flow(struct drm_plane_state *st,
struct komeda_crtc_state *kcrtc_st,
struct komeda_data_flow_cfg *dflow)
{
struct komeda_plane *kplane = to_kplane(st->plane);
struct komeda_plane_state *kplane_st = to_kplane_st(st);
struct drm_framebuffer *fb = st->fb;
const struct komeda_format_caps *caps = to_kfb(fb)->format_caps;
struct komeda_pipeline *pipe = kplane->layer->base.pipeline;
memset(dflow, 0, sizeof(*dflow));
dflow->blending_zorder = st->zpos;
dflow->blending_zorder = st->normalized_zpos;
if (pipe == to_kcrtc(st->crtc)->master)
dflow->blending_zorder -= kcrtc_st->max_slave_zorder;
if (dflow->blending_zorder < 0) {
DRM_DEBUG_ATOMIC("%s zorder:%d < max_slave_zorder: %d.\n",
st->plane->name, st->normalized_zpos,
kcrtc_st->max_slave_zorder);
return -EINVAL;
}
/* if format doesn't have alpha, fix blend mode to PIXEL_NONE */
dflow->pixel_blend_mode = fb->format->has_alpha ?
st->pixel_blend_mode : DRM_MODE_BLEND_PIXEL_NONE;
dflow->pixel_blend_mode = st->pixel_blend_mode;
dflow->layer_alpha = st->alpha >> 8;
dflow->out_x = st->crtc_x;
@ -36,6 +48,20 @@ komeda_plane_init_data_flow(struct drm_plane_state *st,
dflow->in_w = st->src_w >> 16;
dflow->in_h = st->src_h >> 16;
dflow->rot = drm_rotation_simplify(st->rotation, caps->supported_rots);
if (!has_bits(dflow->rot, caps->supported_rots)) {
DRM_DEBUG_ATOMIC("rotation(0x%x) isn't supported by %s.\n",
dflow->rot,
komeda_get_format_name(caps->fourcc,
fb->modifier));
return -EINVAL;
}
dflow->en_img_enhancement = !!kplane_st->img_enhancement;
dflow->en_split = !!kplane_st->layer_split;
komeda_complete_data_flow_cfg(dflow, fb);
return 0;
}
@ -74,11 +100,16 @@ komeda_plane_atomic_check(struct drm_plane *plane,
kcrtc_st = to_kcrtc_st(crtc_st);
err = komeda_plane_init_data_flow(state, &dflow);
err = komeda_plane_init_data_flow(state, kcrtc_st, &dflow);
if (err)
return err;
err = komeda_build_layer_data_flow(layer, kplane_st, kcrtc_st, &dflow);
if (dflow.en_split)
err = komeda_build_layer_split_data_flow(layer,
kplane_st, kcrtc_st, &dflow);
else
err = komeda_build_layer_data_flow(layer,
kplane_st, kcrtc_st, &dflow);
return err;
}
@ -121,6 +152,8 @@ static void komeda_plane_reset(struct drm_plane *plane)
state->base.pixel_blend_mode = DRM_MODE_BLEND_PREMULTI;
state->base.alpha = DRM_BLEND_ALPHA_OPAQUE;
state->base.zpos = kplane->layer->base.id;
state->base.color_encoding = DRM_COLOR_YCBCR_BT601;
state->base.color_range = DRM_COLOR_YCBCR_LIMITED_RANGE;
plane->state = &state->base;
plane->state->plane = plane;
}
@ -129,7 +162,7 @@ static void komeda_plane_reset(struct drm_plane *plane)
static struct drm_plane_state *
komeda_plane_atomic_duplicate_state(struct drm_plane *plane)
{
struct komeda_plane_state *new;
struct komeda_plane_state *new, *old;
if (WARN_ON(!plane->state))
return NULL;
@ -140,6 +173,10 @@ komeda_plane_atomic_duplicate_state(struct drm_plane *plane)
__drm_atomic_helper_plane_duplicate_state(plane, &new->base);
old = to_kplane_st(plane->state);
new->img_enhancement = old->img_enhancement;
return &new->base;
}
@ -151,6 +188,56 @@ komeda_plane_atomic_destroy_state(struct drm_plane *plane,
kfree(to_kplane_st(state));
}
static int
komeda_plane_atomic_get_property(struct drm_plane *plane,
const struct drm_plane_state *state,
struct drm_property *property,
uint64_t *val)
{
struct komeda_plane *kplane = to_kplane(plane);
struct komeda_plane_state *st = to_kplane_st(state);
if (property == kplane->prop_img_enhancement)
*val = st->img_enhancement;
else if (property == kplane->prop_layer_split)
*val = st->layer_split;
else
return -EINVAL;
return 0;
}
static int
komeda_plane_atomic_set_property(struct drm_plane *plane,
struct drm_plane_state *state,
struct drm_property *property,
uint64_t val)
{
struct komeda_plane *kplane = to_kplane(plane);
struct komeda_plane_state *st = to_kplane_st(state);
if (property == kplane->prop_img_enhancement)
st->img_enhancement = !!val;
else if (property == kplane->prop_layer_split)
st->layer_split = !!val;
else
return -EINVAL;
return 0;
}
static bool
komeda_plane_format_mod_supported(struct drm_plane *plane,
u32 format, u64 modifier)
{
struct komeda_dev *mdev = plane->dev->dev_private;
struct komeda_plane *kplane = to_kplane(plane);
u32 layer_type = kplane->layer->layer_type;
return komeda_format_mod_supported(&mdev->fmt_tbl, layer_type,
format, modifier, 0);
}
static const struct drm_plane_funcs komeda_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
@ -158,8 +245,43 @@ static const struct drm_plane_funcs komeda_plane_funcs = {
.reset = komeda_plane_reset,
.atomic_duplicate_state = komeda_plane_atomic_duplicate_state,
.atomic_destroy_state = komeda_plane_atomic_destroy_state,
.atomic_get_property = komeda_plane_atomic_get_property,
.atomic_set_property = komeda_plane_atomic_set_property,
.format_mod_supported = komeda_plane_format_mod_supported,
};
static int
komeda_plane_create_layer_properties(struct komeda_plane *kplane,
struct komeda_layer *layer)
{
struct drm_device *drm = kplane->base.dev;
struct drm_plane *plane = &kplane->base;
struct drm_property *prop = NULL;
/* property: layer image_enhancement */
if (layer->base.supported_outputs & KOMEDA_PIPELINE_SCALERS) {
prop = drm_property_create_bool(drm, DRM_MODE_PROP_ATOMIC,
"img_enhancement");
if (!prop)
return -ENOMEM;
drm_object_attach_property(&plane->base, prop, 0);
kplane->prop_img_enhancement = prop;
}
/* property: layer split */
if (layer->right) {
prop = drm_property_create_bool(drm, DRM_MODE_PROP_ATOMIC,
"layer_split");
if (!prop)
return -ENOMEM;
kplane->prop_layer_split = prop;
drm_object_attach_property(&plane->base, prop, 0);
}
return 0;
}
/* for komeda, which is pipeline can be share between crtcs */
static u32 get_possible_crtcs(struct komeda_kms_dev *kms,
struct komeda_pipeline *pipe)
@ -178,6 +300,22 @@ static u32 get_possible_crtcs(struct komeda_kms_dev *kms,
return possible_crtcs;
}
static void
komeda_set_crtc_plane_mask(struct komeda_kms_dev *kms,
struct komeda_pipeline *pipe,
struct drm_plane *plane)
{
struct komeda_crtc *kcrtc;
int i;
for (i = 0; i < kms->n_crtcs; i++) {
kcrtc = &kms->crtcs[i];
if (pipe == kcrtc->slave)
kcrtc->slave_planes |= BIT(drm_plane_index(plane));
}
}
/* use Layer0 as primary */
static u32 get_plane_type(struct komeda_kms_dev *kms,
struct komeda_component *c)
@ -210,7 +348,7 @@ static int komeda_plane_add(struct komeda_kms_dev *kms,
err = drm_universal_plane_init(&kms->base, plane,
get_possible_crtcs(kms, c->pipeline),
&komeda_plane_funcs,
formats, n_formats, NULL,
formats, n_formats, komeda_supported_modifiers,
get_plane_type(kms, c),
"%s", c->name);
@ -221,6 +359,43 @@ static int komeda_plane_add(struct komeda_kms_dev *kms,
drm_plane_helper_add(plane, &komeda_plane_helper_funcs);
err = drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
layer->supported_rots);
if (err)
goto cleanup;
err = drm_plane_create_alpha_property(plane);
if (err)
goto cleanup;
err = drm_plane_create_blend_mode_property(plane,
BIT(DRM_MODE_BLEND_PIXEL_NONE) |
BIT(DRM_MODE_BLEND_PREMULTI) |
BIT(DRM_MODE_BLEND_COVERAGE));
if (err)
goto cleanup;
err = komeda_plane_create_layer_properties(kplane, layer);
if (err)
goto cleanup;
err = drm_plane_create_color_properties(plane,
BIT(DRM_COLOR_YCBCR_BT601) |
BIT(DRM_COLOR_YCBCR_BT709) |
BIT(DRM_COLOR_YCBCR_BT2020),
BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
BIT(DRM_COLOR_YCBCR_FULL_RANGE),
DRM_COLOR_YCBCR_BT601,
DRM_COLOR_YCBCR_LIMITED_RANGE);
if (err)
goto cleanup;
err = drm_plane_create_zpos_property(plane, layer->base.id, 0, 8);
if (err)
goto cleanup;
komeda_set_crtc_plane_mask(kms, c->pipeline, plane);
return 0;
cleanup:
komeda_plane_destroy(plane);

View File

@ -60,6 +60,49 @@ static int komeda_layer_obj_add(struct komeda_kms_dev *kms,
return 0;
}
static struct drm_private_state *
komeda_scaler_atomic_duplicate_state(struct drm_private_obj *obj)
{
struct komeda_scaler_state *st;
st = kmemdup(obj->state, sizeof(*st), GFP_KERNEL);
if (!st)
return NULL;
komeda_component_state_reset(&st->base);
__drm_atomic_helper_private_obj_duplicate_state(obj, &st->base.obj);
return &st->base.obj;
}
static void
komeda_scaler_atomic_destroy_state(struct drm_private_obj *obj,
struct drm_private_state *state)
{
kfree(to_scaler_st(priv_to_comp_st(state)));
}
static const struct drm_private_state_funcs komeda_scaler_obj_funcs = {
.atomic_duplicate_state = komeda_scaler_atomic_duplicate_state,
.atomic_destroy_state = komeda_scaler_atomic_destroy_state,
};
static int komeda_scaler_obj_add(struct komeda_kms_dev *kms,
struct komeda_scaler *scaler)
{
struct komeda_scaler_state *st;
st = kzalloc(sizeof(*st), GFP_KERNEL);
if (!st)
return -ENOMEM;
st->base.component = &scaler->base;
drm_atomic_private_obj_init(&kms->base,
&scaler->base.obj, &st->base.obj,
&komeda_scaler_obj_funcs);
return 0;
}
static struct drm_private_state *
komeda_compiz_atomic_duplicate_state(struct drm_private_obj *obj)
{
@ -103,6 +146,93 @@ static int komeda_compiz_obj_add(struct komeda_kms_dev *kms,
return 0;
}
static struct drm_private_state *
komeda_splitter_atomic_duplicate_state(struct drm_private_obj *obj)
{
struct komeda_splitter_state *st;
st = kmemdup(obj->state, sizeof(*st), GFP_KERNEL);
if (!st)
return NULL;
komeda_component_state_reset(&st->base);
__drm_atomic_helper_private_obj_duplicate_state(obj, &st->base.obj);
return &st->base.obj;
}
static void
komeda_splitter_atomic_destroy_state(struct drm_private_obj *obj,
struct drm_private_state *state)
{
kfree(to_splitter_st(priv_to_comp_st(state)));
}
static const struct drm_private_state_funcs komeda_splitter_obj_funcs = {
.atomic_duplicate_state = komeda_splitter_atomic_duplicate_state,
.atomic_destroy_state = komeda_splitter_atomic_destroy_state,
};
static int komeda_splitter_obj_add(struct komeda_kms_dev *kms,
struct komeda_splitter *splitter)
{
struct komeda_splitter_state *st;
st = kzalloc(sizeof(*st), GFP_KERNEL);
if (!st)
return -ENOMEM;
st->base.component = &splitter->base;
drm_atomic_private_obj_init(&kms->base,
&splitter->base.obj, &st->base.obj,
&komeda_splitter_obj_funcs);
return 0;
}
static struct drm_private_state *
komeda_merger_atomic_duplicate_state(struct drm_private_obj *obj)
{
struct komeda_merger_state *st;
st = kmemdup(obj->state, sizeof(*st), GFP_KERNEL);
if (!st)
return NULL;
komeda_component_state_reset(&st->base);
__drm_atomic_helper_private_obj_duplicate_state(obj, &st->base.obj);
return &st->base.obj;
}
static void komeda_merger_atomic_destroy_state(struct drm_private_obj *obj,
struct drm_private_state *state)
{
kfree(to_merger_st(priv_to_comp_st(state)));
}
static const struct drm_private_state_funcs komeda_merger_obj_funcs = {
.atomic_duplicate_state = komeda_merger_atomic_duplicate_state,
.atomic_destroy_state = komeda_merger_atomic_destroy_state,
};
static int komeda_merger_obj_add(struct komeda_kms_dev *kms,
struct komeda_merger *merger)
{
struct komeda_merger_state *st;
st = kzalloc(sizeof(*st), GFP_KERNEL);
if (!st)
return -ENOMEM;
st->base.component = &merger->base;
drm_atomic_private_obj_init(&kms->base,
&merger->base.obj, &st->base.obj,
&komeda_merger_obj_funcs);
return 0;
}
static struct drm_private_state *
komeda_improc_atomic_duplicate_state(struct drm_private_obj *obj)
{
@ -252,10 +382,34 @@ int komeda_kms_add_private_objs(struct komeda_kms_dev *kms,
return err;
}
if (pipe->wb_layer) {
err = komeda_layer_obj_add(kms, pipe->wb_layer);
if (err)
return err;
}
for (j = 0; j < pipe->n_scalers; j++) {
err = komeda_scaler_obj_add(kms, pipe->scalers[j]);
if (err)
return err;
}
err = komeda_compiz_obj_add(kms, pipe->compiz);
if (err)
return err;
if (pipe->splitter) {
err = komeda_splitter_obj_add(kms, pipe->splitter);
if (err)
return err;
}
if (pipe->merger) {
err = komeda_merger_obj_add(kms, pipe->merger);
if (err)
return err;
}
err = komeda_improc_obj_add(kms, pipe->improc);
if (err)
return err;

View File

@ -0,0 +1,199 @@
// SPDX-License-Identifier: GPL-2.0
/*
* (C) COPYRIGHT 2018 ARM Limited. All rights reserved.
* Author: James.Qian.Wang <james.qian.wang@arm.com>
*
*/
#include "komeda_dev.h"
#include "komeda_kms.h"
static int
komeda_wb_init_data_flow(struct komeda_layer *wb_layer,
struct drm_connector_state *conn_st,
struct komeda_crtc_state *kcrtc_st,
struct komeda_data_flow_cfg *dflow)
{
struct komeda_scaler *scaler = wb_layer->base.pipeline->scalers[0];
struct drm_framebuffer *fb = conn_st->writeback_job->fb;
memset(dflow, 0, sizeof(*dflow));
dflow->out_w = fb->width;
dflow->out_h = fb->height;
/* the write back data comes from the compiz */
pipeline_composition_size(kcrtc_st, &dflow->in_w, &dflow->in_h);
dflow->input.component = &wb_layer->base.pipeline->compiz->base;
/* compiz doesn't output alpha */
dflow->pixel_blend_mode = DRM_MODE_BLEND_PIXEL_NONE;
dflow->rot = DRM_MODE_ROTATE_0;
komeda_complete_data_flow_cfg(dflow, fb);
/* if scaling exceed the acceptable scaler input/output range, try to
* enable split.
*/
if (dflow->en_scaling && scaler)
dflow->en_split = !in_range(&scaler->hsize, dflow->in_w) ||
!in_range(&scaler->hsize, dflow->out_w);
return 0;
}
static int
komeda_wb_encoder_atomic_check(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_st,
struct drm_connector_state *conn_st)
{
struct komeda_crtc_state *kcrtc_st = to_kcrtc_st(crtc_st);
struct drm_writeback_job *writeback_job = conn_st->writeback_job;
struct komeda_layer *wb_layer;
struct komeda_data_flow_cfg dflow;
int err;
if (!writeback_job || !writeback_job->fb) {
return 0;
}
if (!crtc_st->active) {
DRM_DEBUG_ATOMIC("Cannot write the composition result out on a inactive CRTC.\n");
return -EINVAL;
}
wb_layer = to_kconn(to_wb_conn(conn_st->connector))->wb_layer;
/*
* No need for a full modested when the only connector changed is the
* writeback connector.
*/
if (crtc_st->connectors_changed &&
is_only_changed_connector(crtc_st, conn_st->connector))
crtc_st->connectors_changed = false;
err = komeda_wb_init_data_flow(wb_layer, conn_st, kcrtc_st, &dflow);
if (err)
return err;
if (dflow.en_split)
err = komeda_build_wb_split_data_flow(wb_layer,
conn_st, kcrtc_st, &dflow);
else
err = komeda_build_wb_data_flow(wb_layer,
conn_st, kcrtc_st, &dflow);
return err;
}
static const struct drm_encoder_helper_funcs komeda_wb_encoder_helper_funcs = {
.atomic_check = komeda_wb_encoder_atomic_check,
};
static int
komeda_wb_connector_get_modes(struct drm_connector *connector)
{
return 0;
}
static enum drm_mode_status
komeda_wb_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct drm_device *dev = connector->dev;
struct drm_mode_config *mode_config = &dev->mode_config;
int w = mode->hdisplay, h = mode->vdisplay;
if ((w < mode_config->min_width) || (w > mode_config->max_width))
return MODE_BAD_HVALUE;
if ((h < mode_config->min_height) || (h > mode_config->max_height))
return MODE_BAD_VVALUE;
return MODE_OK;
}
static const struct drm_connector_helper_funcs komeda_wb_conn_helper_funcs = {
.get_modes = komeda_wb_connector_get_modes,
.mode_valid = komeda_wb_connector_mode_valid,
};
static enum drm_connector_status
komeda_wb_connector_detect(struct drm_connector *connector, bool force)
{
return connector_status_connected;
}
static int
komeda_wb_connector_fill_modes(struct drm_connector *connector,
uint32_t maxX, uint32_t maxY)
{
return 0;
}
static void komeda_wb_connector_destroy(struct drm_connector *connector)
{
drm_connector_cleanup(connector);
kfree(to_kconn(to_wb_conn(connector)));
}
static const struct drm_connector_funcs komeda_wb_connector_funcs = {
.reset = drm_atomic_helper_connector_reset,
.detect = komeda_wb_connector_detect,
.fill_modes = komeda_wb_connector_fill_modes,
.destroy = komeda_wb_connector_destroy,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static int komeda_wb_connector_add(struct komeda_kms_dev *kms,
struct komeda_crtc *kcrtc)
{
struct komeda_dev *mdev = kms->base.dev_private;
struct komeda_wb_connector *kwb_conn;
struct drm_writeback_connector *wb_conn;
u32 *formats, n_formats = 0;
int err;
if (!kcrtc->master->wb_layer)
return 0;
kwb_conn = kzalloc(sizeof(*wb_conn), GFP_KERNEL);
if (!kwb_conn)
return -ENOMEM;
kwb_conn->wb_layer = kcrtc->master->wb_layer;
wb_conn = &kwb_conn->base;
wb_conn->encoder.possible_crtcs = BIT(drm_crtc_index(&kcrtc->base));
formats = komeda_get_layer_fourcc_list(&mdev->fmt_tbl,
kwb_conn->wb_layer->layer_type,
&n_formats);
err = drm_writeback_connector_init(&kms->base, wb_conn,
&komeda_wb_connector_funcs,
&komeda_wb_encoder_helper_funcs,
formats, n_formats);
komeda_put_fourcc_list(formats);
if (err)
return err;
drm_connector_helper_add(&wb_conn->base, &komeda_wb_conn_helper_funcs);
kcrtc->wb_conn = kwb_conn;
return 0;
}
int komeda_kms_add_wb_connectors(struct komeda_kms_dev *kms,
struct komeda_dev *mdev)
{
int i, err;
for (i = 0; i < kms->n_crtcs; i++) {
err = komeda_wb_connector_add(kms, &kms->crtcs[i]);
if (err)
return err;
}
return 0;
}

View File

@ -549,19 +549,12 @@ static const struct file_operations malidp_debugfs_fops = {
static int malidp_debugfs_init(struct drm_minor *minor)
{
struct malidp_drm *malidp = minor->dev->dev_private;
struct dentry *dentry = NULL;
malidp_error_stats_init(&malidp->de_errors);
malidp_error_stats_init(&malidp->se_errors);
spin_lock_init(&malidp->errors_lock);
dentry = debugfs_create_file("debug",
S_IRUGO | S_IWUSR,
minor->debugfs_root, minor->dev,
&malidp_debugfs_fops);
if (!dentry) {
DRM_ERROR("Cannot create debug file\n");
return -ENOMEM;
}
debugfs_create_file("debug", S_IRUGO | S_IWUSR, minor->debugfs_root,
minor->dev, &malidp_debugfs_fops);
return 0;
}