drm/msm: Disable the RPTR shadow
[ Upstream commit f6828e0c40
]
Disable the RPTR shadow across all targets. It will be selectively
re-enabled later for targets that need it.
Cc: stable@vger.kernel.org
Signed-off-by: Jordan Crouse <jcrouse@codeaurora.org>
Signed-off-by: Rob Clark <robdclark@chromium.org>
Signed-off-by: Sasha Levin <sashal@kernel.org>
5.4-rM2-2.2.x-imx-squashed
parent
8cbe9b7654
commit
fe79f1cf17
|
@ -164,6 +164,11 @@ static int a2xx_hw_init(struct msm_gpu *gpu)
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
gpu_write(gpu, REG_AXXX_CP_RB_CNTL,
|
||||||
|
MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE);
|
||||||
|
|
||||||
|
gpu_write(gpu, REG_AXXX_CP_RB_BASE, lower_32_bits(gpu->rb[0]->iova));
|
||||||
|
|
||||||
/* NOTE: PM4/micro-engine firmware registers look to be the same
|
/* NOTE: PM4/micro-engine firmware registers look to be the same
|
||||||
* for a2xx and a3xx.. we could possibly push that part down to
|
* for a2xx and a3xx.. we could possibly push that part down to
|
||||||
* adreno_gpu base class. Or push both PM4 and PFP but
|
* adreno_gpu base class. Or push both PM4 and PFP but
|
||||||
|
|
|
@ -215,6 +215,16 @@ static int a3xx_hw_init(struct msm_gpu *gpu)
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Use the default ringbuffer size and block size but disable the RPTR
|
||||||
|
* shadow
|
||||||
|
*/
|
||||||
|
gpu_write(gpu, REG_AXXX_CP_RB_CNTL,
|
||||||
|
MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE);
|
||||||
|
|
||||||
|
/* Set the ringbuffer address */
|
||||||
|
gpu_write(gpu, REG_AXXX_CP_RB_BASE, lower_32_bits(gpu->rb[0]->iova));
|
||||||
|
|
||||||
/* setup access protection: */
|
/* setup access protection: */
|
||||||
gpu_write(gpu, REG_A3XX_CP_PROTECT_CTRL, 0x00000007);
|
gpu_write(gpu, REG_A3XX_CP_PROTECT_CTRL, 0x00000007);
|
||||||
|
|
||||||
|
|
|
@ -265,6 +265,16 @@ static int a4xx_hw_init(struct msm_gpu *gpu)
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Use the default ringbuffer size and block size but disable the RPTR
|
||||||
|
* shadow
|
||||||
|
*/
|
||||||
|
gpu_write(gpu, REG_A4XX_CP_RB_CNTL,
|
||||||
|
MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE);
|
||||||
|
|
||||||
|
/* Set the ringbuffer address */
|
||||||
|
gpu_write(gpu, REG_A4XX_CP_RB_BASE, lower_32_bits(gpu->rb[0]->iova));
|
||||||
|
|
||||||
/* Load PM4: */
|
/* Load PM4: */
|
||||||
ptr = (uint32_t *)(adreno_gpu->fw[ADRENO_FW_PM4]->data);
|
ptr = (uint32_t *)(adreno_gpu->fw[ADRENO_FW_PM4]->data);
|
||||||
len = adreno_gpu->fw[ADRENO_FW_PM4]->size / 4;
|
len = adreno_gpu->fw[ADRENO_FW_PM4]->size / 4;
|
||||||
|
|
|
@ -677,14 +677,21 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
a5xx_preempt_hw_init(gpu);
|
|
||||||
|
|
||||||
a5xx_gpmu_ucode_init(gpu);
|
a5xx_gpmu_ucode_init(gpu);
|
||||||
|
|
||||||
ret = a5xx_ucode_init(gpu);
|
ret = a5xx_ucode_init(gpu);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
/* Set the ringbuffer address */
|
||||||
|
gpu_write64(gpu, REG_A5XX_CP_RB_BASE, REG_A5XX_CP_RB_BASE_HI,
|
||||||
|
gpu->rb[0]->iova);
|
||||||
|
|
||||||
|
gpu_write(gpu, REG_A5XX_CP_RB_CNTL,
|
||||||
|
MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE);
|
||||||
|
|
||||||
|
a5xx_preempt_hw_init(gpu);
|
||||||
|
|
||||||
/* Disable the interrupts through the initial bringup stage */
|
/* Disable the interrupts through the initial bringup stage */
|
||||||
gpu_write(gpu, REG_A5XX_RBBM_INT_0_MASK, A5XX_INT_MASK);
|
gpu_write(gpu, REG_A5XX_RBBM_INT_0_MASK, A5XX_INT_MASK);
|
||||||
|
|
||||||
|
|
|
@ -512,6 +512,13 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
|
/* Set the ringbuffer address */
|
||||||
|
gpu_write64(gpu, REG_A6XX_CP_RB_BASE, REG_A6XX_CP_RB_BASE_HI,
|
||||||
|
gpu->rb[0]->iova);
|
||||||
|
|
||||||
|
gpu_write(gpu, REG_A6XX_CP_RB_CNTL,
|
||||||
|
MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE);
|
||||||
|
|
||||||
/* Always come up on rb 0 */
|
/* Always come up on rb 0 */
|
||||||
a6xx_gpu->cur_ring = gpu->rb[0];
|
a6xx_gpu->cur_ring = gpu->rb[0];
|
||||||
|
|
||||||
|
|
|
@ -354,26 +354,6 @@ int adreno_hw_init(struct msm_gpu *gpu)
|
||||||
ring->memptrs->rptr = 0;
|
ring->memptrs->rptr = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Setup REG_CP_RB_CNTL. The same value is used across targets (with
|
|
||||||
* the excpetion of A430 that disables the RPTR shadow) - the cacluation
|
|
||||||
* for the ringbuffer size and block size is moved to msm_gpu.h for the
|
|
||||||
* pre-processor to deal with and the A430 variant is ORed in here
|
|
||||||
*/
|
|
||||||
adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_CNTL,
|
|
||||||
MSM_GPU_RB_CNTL_DEFAULT |
|
|
||||||
(adreno_is_a430(adreno_gpu) ? AXXX_CP_RB_CNTL_NO_UPDATE : 0));
|
|
||||||
|
|
||||||
/* Setup ringbuffer address - use ringbuffer[0] for GPU init */
|
|
||||||
adreno_gpu_write64(adreno_gpu, REG_ADRENO_CP_RB_BASE,
|
|
||||||
REG_ADRENO_CP_RB_BASE_HI, gpu->rb[0]->iova);
|
|
||||||
|
|
||||||
if (!adreno_is_a430(adreno_gpu)) {
|
|
||||||
adreno_gpu_write64(adreno_gpu, REG_ADRENO_CP_RB_RPTR_ADDR,
|
|
||||||
REG_ADRENO_CP_RB_RPTR_ADDR_HI,
|
|
||||||
rbmemptr(gpu->rb[0], rptr));
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -381,11 +361,8 @@ int adreno_hw_init(struct msm_gpu *gpu)
|
||||||
static uint32_t get_rptr(struct adreno_gpu *adreno_gpu,
|
static uint32_t get_rptr(struct adreno_gpu *adreno_gpu,
|
||||||
struct msm_ringbuffer *ring)
|
struct msm_ringbuffer *ring)
|
||||||
{
|
{
|
||||||
if (adreno_is_a430(adreno_gpu))
|
return ring->memptrs->rptr = adreno_gpu_read(
|
||||||
return ring->memptrs->rptr = adreno_gpu_read(
|
adreno_gpu, REG_ADRENO_CP_RB_RPTR);
|
||||||
adreno_gpu, REG_ADRENO_CP_RB_RPTR);
|
|
||||||
else
|
|
||||||
return ring->memptrs->rptr;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
struct msm_ringbuffer *adreno_active_ring(struct msm_gpu *gpu)
|
struct msm_ringbuffer *adreno_active_ring(struct msm_gpu *gpu)
|
||||||
|
|
Loading…
Reference in New Issue