@@ -614,14 +614,16 @@ static void a6xx_set_hwcg(struct msm_gpu *gpu, bool state)
return;
/* Disable SP clock before programming HWCG registers */
- if (!adreno_has_gmu_wrapper(adreno_gpu))
+ if (!adreno_has_gmu_wrapper(adreno_gpu) ||
+ adreno_is_a619_holi(adreno_gpu))
gmu_rmw(gmu, REG_A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 1, 0);
for (i = 0; (reg = &adreno_gpu->info->hwcg[i], reg->offset); i++)
gpu_write(gpu, reg->offset, state ? reg->value : 0);
/* Enable SP clock */
- if (!adreno_has_gmu_wrapper(adreno_gpu))
+ if (!adreno_has_gmu_wrapper(adreno_gpu) ||
+ adreno_is_a619_holi(adreno_gpu))
gmu_rmw(gmu, REG_A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 0, 1);
gpu_write(gpu, REG_A6XX_RBBM_CLOCK_CNTL, state ? clock_cntl_on : 0);
@@ -814,6 +816,9 @@ static void a6xx_set_ubwc_config(struct msm_gpu *gpu)
if (adreno_is_a618(adreno_gpu))
return;
+ if (adreno_is_a619_holi(adreno_gpu))
+ hbb_lo = 0;
+
if (adreno_is_a640_family(adreno_gpu))
amsbc = 1;
@@ -1015,7 +1020,12 @@ static int hw_init(struct msm_gpu *gpu)
}
/* Clear GBIF halt in case GX domain was not collapsed */
- if (a6xx_has_gbif(adreno_gpu)) {
+ if (adreno_is_a619_holi(adreno_gpu)) {
+ gpu_write(gpu, REG_A6XX_GBIF_HALT, 0);
+ gpu_write(gpu, 0x18, 0);
+ /* Let's make extra sure that the GPU can access the memory.. */
+ mb();
+ } else if (a6xx_has_gbif(adreno_gpu)) {
gpu_write(gpu, REG_A6XX_GBIF_HALT, 0);
gpu_write(gpu, REG_A6XX_RBBM_GBIF_HALT, 0);
/* Let's make extra sure that the GPU can access the memory.. */
@@ -1024,6 +1034,9 @@ static int hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_CNTL, 0);
+ if (adreno_is_a619_holi(adreno_gpu))
+ a6xx_sptprac_enable(gmu);
+
/*
* Disable the trusted memory range - we don't actually supported secure
* memory rendering at this point in time and we don't want to block off
@@ -1298,7 +1311,8 @@ static void a6xx_dump(struct msm_gpu *gpu)
#define GBIF_CLIENT_HALT_MASK BIT(0)
#define GBIF_ARB_HALT_MASK BIT(1)
#define VBIF_RESET_ACK_TIMEOUT 100
-#define VBIF_RESET_ACK_MASK 0x00f0
+#define VBIF_RESET_ACK_MASK 0xF0
+#define GPR0_GBIF_HALT_REQUEST 0x1E0
static void a6xx_recover(struct msm_gpu *gpu)
{
@@ -1362,10 +1376,16 @@ static void a6xx_recover(struct msm_gpu *gpu)
/* Software-reset the GPU */
if (adreno_has_gmu_wrapper(adreno_gpu)) {
- /* Halt the GX side of GBIF */
- gpu_write(gpu, REG_A6XX_RBBM_GBIF_HALT, GBIF_GX_HALT_MASK);
- spin_until(gpu_read(gpu, REG_A6XX_RBBM_GBIF_HALT_ACK) &
- GBIF_GX_HALT_MASK);
+ if (adreno_is_a619_holi(adreno_gpu)) {
+ gpu_write(gpu, 0x18, GPR0_GBIF_HALT_REQUEST);
+ spin_until((gpu_read(gpu, REG_A6XX_RBBM_VBIF_GX_RESET_STATUS) &
+ (VBIF_RESET_ACK_MASK)) == VBIF_RESET_ACK_MASK);
+ } else {
+ /* Halt the GX side of GBIF */
+ gpu_write(gpu, REG_A6XX_RBBM_GBIF_HALT, GBIF_GX_HALT_MASK);
+ spin_until(gpu_read(gpu, REG_A6XX_RBBM_GBIF_HALT_ACK) &
+ GBIF_GX_HALT_MASK);
+ }
/* Halt new client requests on GBIF */
gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_CLIENT_HALT_MASK);
@@ -1380,7 +1400,10 @@ static void a6xx_recover(struct msm_gpu *gpu)
/* Clear the halts */
gpu_write(gpu, REG_A6XX_GBIF_HALT, 0);
- gpu_write(gpu, REG_A6XX_RBBM_GBIF_HALT, 0);
+ if (adreno_is_a619_holi(adreno_gpu))
+ gpu_write(gpu, 0x18, 0);
+ else
+ gpu_write(gpu, REG_A6XX_RBBM_GBIF_HALT, 0);
/* This *really* needs to go through before we do anything else! */
mb();
@@ -1786,6 +1809,9 @@ static int a6xx_pm_resume(struct msm_gpu *gpu)
if (ret)
goto err_mem_clk;
+ if (adreno_is_a619_holi(adreno_gpu))
+ a6xx_sptprac_enable(gmu);
+
/* If anything goes south, tear the GPU down piece by piece.. */
if (ret) {
err_mem_clk:
@@ -1851,6 +1877,9 @@ static int a6xx_pm_suspend(struct msm_gpu *gpu)
mutex_lock(&a6xx_gpu->gmu.lock);
+ if (adreno_is_a619_holi(adreno_gpu))
+ a6xx_sptprac_disable(gmu);
+
clk_disable_unprepare(gpu->ebi1_clk);
clk_bulk_disable_unprepare(gpu->nr_clocks, gpu->grp_clks);
@@ -252,6 +252,11 @@ static inline int adreno_is_a619(struct adreno_gpu *gpu)
return gpu->revn == 619;
}
+static inline int adreno_is_a619_holi(struct adreno_gpu *gpu)
+{
+ return adreno_is_a619(gpu) && adreno_has_gmu_wrapper(gpu);
+}
+
static inline int adreno_is_a630(struct adreno_gpu *gpu)
{
return gpu->revn == 630;
A619_holi is a GMU-less variant of the already-supported A619 GPU. It's present on at least SM4350 (holi) and SM6375 (blair). No mesa changes are required. Add the required kernel-side support for it. Signed-off-by: Konrad Dybcio <konrad.dybcio@linaro.org> --- drivers/gpu/drm/msm/adreno/a6xx_gpu.c | 47 ++++++++++++++++++++++++++------- drivers/gpu/drm/msm/adreno/adreno_gpu.h | 5 ++++ 2 files changed, 43 insertions(+), 9 deletions(-)