@@ -981,11 +981,13 @@ static void a6xx_gmu_set_initial_bw(struct msm_gpu *gpu, struct a6xx_gmu *gmu)
dev_pm_opp_put(gpu_opp);
}
+#define GMU_ACD_STATE_MSG_LEN 36
int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
{
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
struct msm_gpu *gpu = &adreno_gpu->base;
struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+ char buf[GMU_ACD_STATE_MSG_LEN];
int status, ret;
if (WARN(!gmu->initialized, "The GMU is not set up yet\n"))
@@ -993,6 +995,18 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
gmu->hung = false;
+ /* Notify AOSS about the ACD state (unimplemented for now => disable it) */
+ if (!IS_ERR(gmu->qmp)) {
+ ret = snprintf(buf, sizeof(buf),
+ "{class: gpu, res: acd, val: %d}",
+ 0 /* Hardcode ACD to be disabled for now */);
+ WARN_ON(ret >= GMU_ACD_STATE_MSG_LEN);
+
+ ret = qmp_send(gmu->qmp, buf, sizeof(buf));
+ if (ret)
+ dev_err(gmu->dev, "failed to send GPU ACD state\n");
+ }
+
/* Turn on the resources */
pm_runtime_get_sync(gmu->dev);
@@ -1745,6 +1759,10 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
goto detach_cxpd;
}
+ gmu->qmp = qmp_get(gmu->dev);
+ if (IS_ERR(gmu->qmp) && adreno_is_a7xx(adreno_gpu))
+ return PTR_ERR(gmu->qmp);
+
init_completion(&gmu->pd_gate);
complete_all(&gmu->pd_gate);
gmu->pd_nb.notifier_call = cxpd_notifier_cb;
@@ -1768,6 +1786,9 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
return 0;
+ if (!IS_ERR_OR_NULL(gmu->qmp))
+ qmp_put(gmu->qmp);
+
detach_cxpd:
dev_pm_domain_detach(gmu->cxpd, false);
@@ -8,6 +8,7 @@
#include <linux/iopoll.h>
#include <linux/interrupt.h>
#include <linux/notifier.h>
+#include <linux/soc/qcom/qcom_aoss.h>
#include "msm_drv.h"
#include "a6xx_hfi.h"
@@ -96,6 +97,8 @@ struct a6xx_gmu {
/* For power domain callback */
struct notifier_block pd_nb;
struct completion pd_gate;
+
+ struct qmp *qmp;
};
static inline u32 gmu_read(struct a6xx_gmu *gmu, u32 offset)