@@ -113,6 +113,7 @@ void a6xx_gmu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp,
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
u32 perf_index;
+ u32 bw_index = 0;
unsigned long gpu_freq;
int ret = 0;
@@ -125,6 +126,16 @@ void a6xx_gmu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp,
if (gpu_freq == gmu->gpu_freqs[perf_index])
break;
+ /* If enabled, find the corresponding DDR bandwidth index */
+ if ((adreno_gpu->info->quirks & ADRENO_QUIRK_GMU_BW_VOTE) && gmu->nr_gpu_bws) {
+ unsigned int bw = dev_pm_opp_get_bandwidth(opp, true, 0);
+
+ for (bw_index = 0; bw_index < gmu->nr_gpu_bws - 1; bw_index++) {
+ if (bw == gmu->gpu_bw_table[bw_index])
+ break;
+ }
+ }
+
gmu->current_perf_index = perf_index;
gmu->freq = gmu->gpu_freqs[perf_index];
@@ -140,8 +151,10 @@ void a6xx_gmu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp,
return;
if (!gmu->legacy) {
- a6xx_hfi_set_freq(gmu, perf_index);
- dev_pm_opp_set_opp(&gpu->pdev->dev, opp);
+ a6xx_hfi_set_freq(gmu, perf_index, bw_index);
+ /* With Bandwidth voting, we now vote for all resources, so skip OPP set */
+ if (bw_index)
+ dev_pm_opp_set_opp(&gpu->pdev->dev, opp);
return;
}
@@ -205,7 +205,7 @@ void a6xx_hfi_init(struct a6xx_gmu *gmu);
int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state);
void a6xx_hfi_stop(struct a6xx_gmu *gmu);
int a6xx_hfi_send_prep_slumber(struct a6xx_gmu *gmu);
-int a6xx_hfi_set_freq(struct a6xx_gmu *gmu, int index);
+int a6xx_hfi_set_freq(struct a6xx_gmu *gmu, int perf_index, int bw_index);
bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu);
bool a6xx_gmu_sptprac_is_on(struct a6xx_gmu *gmu);
@@ -752,13 +752,13 @@ static int a6xx_hfi_send_core_fw_start(struct a6xx_gmu *gmu)
sizeof(msg), NULL, 0);
}
-int a6xx_hfi_set_freq(struct a6xx_gmu *gmu, int index)
+int a6xx_hfi_set_freq(struct a6xx_gmu *gmu, int freq_index, int bw_index)
{
struct a6xx_hfi_gx_bw_perf_vote_cmd msg = { 0 };
msg.ack_type = 1; /* blocking */
- msg.freq = index;
- msg.bw = 0; /* TODO: bus scaling */
+ msg.freq = freq_index;
+ msg.bw = bw_index;
return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_GX_BW_PERF_VOTE, &msg,
sizeof(msg), NULL, 0);
The Adreno GMU Management Unit (GMU) can also scale the DDR Bandwidth along the Frequency and Power Domain level, until now we left the OPP core scale the OPP bandwidth via the interconnect path. In order to enable bandwidth voting via the GPU Management Unit (GMU), when an opp is set by devfreq we also look for the corresponding bandwidth index in the previously generated bw_table and pass this value along the frequency index to the GMU. Since we now vote for all resources via the GMU, setting the OPP is no more needed, so we can completely skip calling dev_pm_opp_set_opp() in this situation. Signed-off-by: Neil Armstrong <neil.armstrong@linaro.org> --- drivers/gpu/drm/msm/adreno/a6xx_gmu.c | 17 +++++++++++++++-- drivers/gpu/drm/msm/adreno/a6xx_gmu.h | 2 +- drivers/gpu/drm/msm/adreno/a6xx_hfi.c | 6 +++--- 3 files changed, 19 insertions(+), 6 deletions(-)