@@ -17,6 +17,7 @@
#include <linux/clk.h>
#include <linux/reset-controller.h>
#include <linux/arm-smccc.h>
+#include <linux/gunyah_rsc_mgr.h>
#include "qcom_scm.h"
@@ -27,6 +28,9 @@ module_param(download_mode, bool, 0);
#define SCM_HAS_IFACE_CLK BIT(1)
#define SCM_HAS_BUS_CLK BIT(2)
+#define QCOM_SCM_RM_MANAGED_VMID 0x3A
+#define QCOM_SCM_MAX_MANAGED_VMID 0x3F
+
struct qcom_scm {
struct device *dev;
struct clk *core_clk;
@@ -1292,6 +1296,113 @@ int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val,
}
EXPORT_SYMBOL(qcom_scm_lmh_dcvsh);
+static int qcom_scm_gh_rm_pre_mem_share(struct gh_rm_mem_parcel *mem_parcel)
+{
+ struct qcom_scm_vmperm *new_perms;
+ u16 this_vmid;
+ u64 src, src_cpy;
+ int ret, i, n;
+
+ ret = gh_rm_get_vmid(&this_vmid);
+ if (ret)
+ return ret;
+
+ new_perms = kcalloc(mem_parcel->n_acl_entries, sizeof(*new_perms), GFP_KERNEL);
+ if (!new_perms)
+ return -ENOMEM;
+
+ for (n = 0; n < mem_parcel->n_acl_entries; n++) {
+ if (mem_parcel->acl_entries[n].vmid <= QCOM_SCM_MAX_MANAGED_VMID)
+ new_perms[n].vmid = mem_parcel->acl_entries[n].vmid;
+ else
+ new_perms[n].vmid = QCOM_SCM_RM_MANAGED_VMID;
+ if (mem_parcel->acl_entries[n].perms & GH_RM_ACL_X)
+ new_perms[n].perm |= QCOM_SCM_PERM_EXEC;
+ if (mem_parcel->acl_entries[n].perms & GH_RM_ACL_W)
+ new_perms[n].perm |= QCOM_SCM_PERM_WRITE;
+ if (mem_parcel->acl_entries[n].perms & GH_RM_ACL_R)
+ new_perms[n].perm |= QCOM_SCM_PERM_READ;
+ }
+
+ if (this_vmid <= QCOM_SCM_MAX_MANAGED_VMID)
+ src = (1ul << this_vmid);
+ else
+ src = (1ul << QCOM_SCM_RM_MANAGED_VMID);
+
+ for (i = 0; i < mem_parcel->n_mem_entries; i++) {
+ src_cpy = src;
+ ret = qcom_scm_assign_mem(mem_parcel->mem_entries[i].ipa_base,
+ mem_parcel->mem_entries[i].size,
+ &src_cpy, new_perms, mem_parcel->n_acl_entries);
+ if (ret) {
+ src = 0;
+ for (n = 0; n < mem_parcel->n_acl_entries; n++) {
+ if (mem_parcel->acl_entries[n].vmid <= QCOM_SCM_MAX_MANAGED_VMID)
+ src |= (1ul << mem_parcel->acl_entries[n].vmid);
+ else
+ src |= (1ul << QCOM_SCM_RM_MANAGED_VMID);
+ }
+
+ if (this_vmid <= QCOM_SCM_MAX_MANAGED_VMID)
+ new_perms[0].vmid = this_vmid;
+ else
+ new_perms[0].vmid = QCOM_SCM_RM_MANAGED_VMID;
+
+ for (i--; i >= 0; i--) {
+ src_cpy = src;
+ ret = qcom_scm_assign_mem(mem_parcel->mem_entries[i].ipa_base,
+ mem_parcel->mem_entries[i].size,
+ &src_cpy, new_perms, 1);
+ WARN_ON_ONCE(ret);
+ }
+ break;
+ }
+ }
+
+ kfree(new_perms);
+ return ret;
+}
+
+static int qcom_scm_gh_rm_post_mem_reclaim(struct gh_rm_mem_parcel *mem_parcel)
+{
+ struct qcom_scm_vmperm new_perms;
+ u16 this_vmid;
+ u64 src = 0;
+ int ret, i, n;
+
+
+ ret = gh_rm_get_vmid(&this_vmid);
+ if (ret)
+ return ret;
+
+ if (this_vmid <= QCOM_SCM_MAX_MANAGED_VMID)
+ new_perms.vmid = this_vmid;
+ else
+ new_perms.vmid = QCOM_SCM_RM_MANAGED_VMID;
+ new_perms.perm = QCOM_SCM_PERM_EXEC | QCOM_SCM_PERM_WRITE | QCOM_SCM_PERM_READ;
+
+ for (n = 0; n < mem_parcel->n_acl_entries; n++) {
+ if (mem_parcel->acl_entries[n].vmid <= QCOM_SCM_MAX_MANAGED_VMID)
+ src |= (1ul << mem_parcel->acl_entries[n].vmid);
+ else
+ src |= (1ul << QCOM_SCM_RM_MANAGED_VMID);
+ }
+
+ for (i = 0; i < mem_parcel->n_mem_entries; i++) {
+ ret = qcom_scm_assign_mem(mem_parcel->mem_entries[i].ipa_base,
+ mem_parcel->mem_entries[i].size,
+ &src, &new_perms, 1);
+ WARN_ON_ONCE(ret);
+ }
+
+ return ret;
+}
+
+static struct gunyah_rm_platform_ops qcom_scm_gh_rm_platform_ops = {
+ .pre_mem_share = qcom_scm_gh_rm_pre_mem_share,
+ .post_mem_reclaim = qcom_scm_gh_rm_post_mem_reclaim,
+};
+
static int qcom_scm_find_dload_address(struct device *dev, u64 *addr)
{
struct device_node *tcsr;
@@ -1414,6 +1525,9 @@ static int qcom_scm_probe(struct platform_device *pdev)
if (download_mode)
qcom_scm_set_download_mode(true);
+ if (gh_rm_register_platform_ops(&qcom_scm_gh_rm_platform_ops))
+ dev_warn(__scm->dev, "Gunyah RM platform ops were already registered\n");
+
return 0;
}
@@ -142,6 +142,11 @@ void gh_rm_driver_unregister(struct gh_rm_driver *ghrm_drv);
#define module_gh_rm_driver(ghrm_drv) \
module_driver(ghrm_drv, gh_rm_driver_register, gh_rm_driver_unregister)
+struct gunyah_rm_platform_ops {
+ int (*pre_mem_share)(struct gh_rm_mem_parcel *mem_parcel);
+ int (*post_mem_reclaim)(struct gh_rm_mem_parcel *mem_parcel);
+};
+
#if IS_ENABLED(CONFIG_GUNYAH)
int gh_rm_register_platform_ops(struct gunyah_rm_platform_ops *platform_ops);
void gh_rm_unregister_platform_ops(struct gunyah_rm_platform_ops *platform_ops);