Message ID | 20230120224627.4053418-19-quic_eberman@quicinc.com |
---|---|
State | Superseded |
Headers | show |
Series | Drivers for gunyah hypervisor | expand |
* Elliot Berman <quic_eberman@quicinc.com> [2023-01-20 14:46:17]: > + for (i = 0; i < mem_parcel->n_mem_entries; i++) { > + src_cpy = src; > + ret = qcom_scm_assign_mem(le64_to_cpu(mem_parcel->mem_entries[i].ipa_base), > + le64_to_cpu(mem_parcel->mem_entries[i].size), > + &src_cpy, new_perms, mem_parcel->n_acl_entries); > + if (ret) { > + src = 0; > + for (n = 0; n < mem_parcel->n_acl_entries; n++) { > + vmid = le16_to_cpu(mem_parcel->acl_entries[n].vmid); > + if (vmid <= QCOM_SCM_MAX_MANAGED_VMID) > + src |= (1ull << vmid); > + else > + src |= (1ull << QCOM_SCM_RM_MANAGED_VMID); > + } > + > + new_perms[0].vmid = QCOM_SCM_VMID_HLOS; > + > + for (i--; i >= 0; i--) { > + src_cpy = src; > + ret = qcom_scm_assign_mem( > + le64_to_cpu(mem_parcel->mem_entries[i].ipa_base), > + le64_to_cpu(mem_parcel->mem_entries[i].size), > + &src_cpy, new_perms, 1); > + WARN_ON_ONCE(ret); > + } We would be returning 0 incorrectly here, indicating success. > @@ -1414,6 +1511,9 @@ static int qcom_scm_probe(struct platform_device *pdev) > if (download_mode) > qcom_scm_set_download_mode(true); > > + if (gh_rm_register_platform_ops(&qcom_scm_gh_rm_platform_ops)) > + dev_warn(__scm->dev, "Gunyah RM platform ops were already registered\n"); > + We should remove above registration in qcom_scm_shutdown.
On 20/01/2023 22:46, Elliot Berman wrote: > Qualcomm platforms have a firmware entity which performs access control > to physical pages. Dynamically started Gunyah virtual machines use the > QCOM_SCM_RM_MANAGED_VMID for access. Linux thus needs to assign access > to the memory used by guest VMs. Gunyah doesn't do this operation for us > since it is the current VM (typically VMID_HLOS) delegating the access > and not Gunyah itself. Use the Gunyah platform ops to achieve this so > that only Qualcomm platforms attempt to make the needed SCM calls. > > Co-developed-by: Prakruthi Deepak Heragu <quic_pheragu@quicinc.com> > Signed-off-by: Prakruthi Deepak Heragu <quic_pheragu@quicinc.com> > Signed-off-by: Elliot Berman <quic_eberman@quicinc.com> > --- > drivers/firmware/Kconfig | 2 + > drivers/firmware/qcom_scm.c | 100 ++++++++++++++++++++++++++++++++++++ > 2 files changed, 102 insertions(+) > > diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig > index b59e3041fd62..b888068ff6f2 100644 > --- a/drivers/firmware/Kconfig > +++ b/drivers/firmware/Kconfig > @@ -214,6 +214,8 @@ config MTK_ADSP_IPC > > config QCOM_SCM > tristate > + select VIRT_DRIVERS > + select GUNYAH_PLATFORM_HOOKS So far SCM usage has been as library of functions to talk to Secure world, now why is this selecting GUNYAH, it should be other way round. > > config QCOM_SCM_DOWNLOAD_MODE_DEFAULT > bool "Qualcomm download mode enabled by default" > diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c > index 92763dce6477..20a1434087eb 100644 > --- a/drivers/firmware/qcom_scm.c > +++ b/drivers/firmware/qcom_scm.c > @@ -17,6 +17,7 @@ > #include <linux/clk.h> > #include <linux/reset-controller.h> > #include <linux/arm-smccc.h> > +#include <linux/gunyah_rsc_mgr.h> > > #include "qcom_scm.h" > > @@ -27,6 +28,9 @@ module_param(download_mode, bool, 0); > #define SCM_HAS_IFACE_CLK BIT(1) > #define SCM_HAS_BUS_CLK BIT(2) > > +#define QCOM_SCM_RM_MANAGED_VMID 0x3A > +#define QCOM_SCM_MAX_MANAGED_VMID 0x3F > + > struct qcom_scm { > struct device *dev; > struct clk *core_clk; > @@ -1292,6 +1296,99 @@ int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val, > } > EXPORT_SYMBOL(qcom_scm_lmh_dcvsh); > > +static int qcom_scm_gh_rm_pre_mem_share(struct gh_rm *rm, struct gh_rm_mem_parcel *mem_parcel) why can't this be an exported function like other scm interfaces? We do not need a redirection here tbh. That will also remove the need of gunyah_platform_hooks.c altogether, and you could call scm functions directly. Correct me if this is not the case. > +{ > + struct qcom_scm_vmperm *new_perms; > + u64 src, src_cpy; > + int ret = 0, i, n; > + u16 vmid; > + > + new_perms = kcalloc(mem_parcel->n_acl_entries, sizeof(*new_perms), GFP_KERNEL); > + if (!new_perms) > + return -ENOMEM; > + > + for (n = 0; n < mem_parcel->n_acl_entries; n++) { > + vmid = le16_to_cpu(mem_parcel->acl_entries[n].vmid); > + if (vmid <= QCOM_SCM_MAX_MANAGED_VMID) > + new_perms[n].vmid = vmid; > + else > + new_perms[n].vmid = QCOM_SCM_RM_MANAGED_VMID; > + if (mem_parcel->acl_entries[n].perms & GH_RM_ACL_X) > + new_perms[n].perm |= QCOM_SCM_PERM_EXEC; > + if (mem_parcel->acl_entries[n].perms & GH_RM_ACL_W) > + new_perms[n].perm |= QCOM_SCM_PERM_WRITE; > + if (mem_parcel->acl_entries[n].perms & GH_RM_ACL_R) > + new_perms[n].perm |= QCOM_SCM_PERM_READ; > + } > + > + src = (1ull << QCOM_SCM_VMID_HLOS); > + > + for (i = 0; i < mem_parcel->n_mem_entries; i++) { > + src_cpy = src; > + ret = qcom_scm_assign_mem(le64_to_cpu(mem_parcel->mem_entries[i].ipa_base), > + le64_to_cpu(mem_parcel->mem_entries[i].size), > + &src_cpy, new_perms, mem_parcel->n_acl_entries); > + if (ret) { > + src = 0; > + for (n = 0; n < mem_parcel->n_acl_entries; n++) { > + vmid = le16_to_cpu(mem_parcel->acl_entries[n].vmid); > + if (vmid <= QCOM_SCM_MAX_MANAGED_VMID) > + src |= (1ull << vmid); > + else > + src |= (1ull << QCOM_SCM_RM_MANAGED_VMID); > + } > + > + new_perms[0].vmid = QCOM_SCM_VMID_HLOS; > + > + for (i--; i >= 0; i--) { > + src_cpy = src; > + ret = qcom_scm_assign_mem( > + le64_to_cpu(mem_parcel->mem_entries[i].ipa_base), > + le64_to_cpu(mem_parcel->mem_entries[i].size), > + &src_cpy, new_perms, 1); > + WARN_ON_ONCE(ret); > + } > + break; > + } > + } > + > + kfree(new_perms); > + return ret; > +} > + > +static int qcom_scm_gh_rm_post_mem_reclaim(struct gh_rm *rm, struct gh_rm_mem_parcel *mem_parcel) > +{ > + struct qcom_scm_vmperm new_perms; > + u64 src = 0; > + int ret = 0, i, n; > + u16 vmid; > + > + new_perms.vmid = QCOM_SCM_VMID_HLOS; > + new_perms.perm = QCOM_SCM_PERM_EXEC | QCOM_SCM_PERM_WRITE | QCOM_SCM_PERM_READ; > + > + for (n = 0; n < mem_parcel->n_acl_entries; n++) { > + vmid = le16_to_cpu(mem_parcel->acl_entries[n].vmid); > + if (vmid <= QCOM_SCM_MAX_MANAGED_VMID) > + src |= (1ull << vmid); > + else > + src |= (1ull << QCOM_SCM_RM_MANAGED_VMID); > + } > + > + for (i = 0; i < mem_parcel->n_mem_entries; i++) { > + ret = qcom_scm_assign_mem(le64_to_cpu(mem_parcel->mem_entries[i].ipa_base), > + le64_to_cpu(mem_parcel->mem_entries[i].size), > + &src, &new_perms, 1); > + WARN_ON_ONCE(ret); > + } > + > + return ret; > +} > + > +static struct gunyah_rm_platform_ops qcom_scm_gh_rm_platform_ops = { > + .pre_mem_share = qcom_scm_gh_rm_pre_mem_share, > + .post_mem_reclaim = qcom_scm_gh_rm_post_mem_reclaim, > +}; > + > static int qcom_scm_find_dload_address(struct device *dev, u64 *addr) > { > struct device_node *tcsr; > @@ -1414,6 +1511,9 @@ static int qcom_scm_probe(struct platform_device *pdev) > if (download_mode) > qcom_scm_set_download_mode(true); > > + if (gh_rm_register_platform_ops(&qcom_scm_gh_rm_platform_ops)) > + dev_warn(__scm->dev, "Gunyah RM platform ops were already registered\n"); > + > return 0; > } >
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig index b59e3041fd62..b888068ff6f2 100644 --- a/drivers/firmware/Kconfig +++ b/drivers/firmware/Kconfig @@ -214,6 +214,8 @@ config MTK_ADSP_IPC config QCOM_SCM tristate + select VIRT_DRIVERS + select GUNYAH_PLATFORM_HOOKS config QCOM_SCM_DOWNLOAD_MODE_DEFAULT bool "Qualcomm download mode enabled by default" diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c index 92763dce6477..20a1434087eb 100644 --- a/drivers/firmware/qcom_scm.c +++ b/drivers/firmware/qcom_scm.c @@ -17,6 +17,7 @@ #include <linux/clk.h> #include <linux/reset-controller.h> #include <linux/arm-smccc.h> +#include <linux/gunyah_rsc_mgr.h> #include "qcom_scm.h" @@ -27,6 +28,9 @@ module_param(download_mode, bool, 0); #define SCM_HAS_IFACE_CLK BIT(1) #define SCM_HAS_BUS_CLK BIT(2) +#define QCOM_SCM_RM_MANAGED_VMID 0x3A +#define QCOM_SCM_MAX_MANAGED_VMID 0x3F + struct qcom_scm { struct device *dev; struct clk *core_clk; @@ -1292,6 +1296,99 @@ int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val, } EXPORT_SYMBOL(qcom_scm_lmh_dcvsh); +static int qcom_scm_gh_rm_pre_mem_share(struct gh_rm *rm, struct gh_rm_mem_parcel *mem_parcel) +{ + struct qcom_scm_vmperm *new_perms; + u64 src, src_cpy; + int ret = 0, i, n; + u16 vmid; + + new_perms = kcalloc(mem_parcel->n_acl_entries, sizeof(*new_perms), GFP_KERNEL); + if (!new_perms) + return -ENOMEM; + + for (n = 0; n < mem_parcel->n_acl_entries; n++) { + vmid = le16_to_cpu(mem_parcel->acl_entries[n].vmid); + if (vmid <= QCOM_SCM_MAX_MANAGED_VMID) + new_perms[n].vmid = vmid; + else + new_perms[n].vmid = QCOM_SCM_RM_MANAGED_VMID; + if (mem_parcel->acl_entries[n].perms & GH_RM_ACL_X) + new_perms[n].perm |= QCOM_SCM_PERM_EXEC; + if (mem_parcel->acl_entries[n].perms & GH_RM_ACL_W) + new_perms[n].perm |= QCOM_SCM_PERM_WRITE; + if (mem_parcel->acl_entries[n].perms & GH_RM_ACL_R) + new_perms[n].perm |= QCOM_SCM_PERM_READ; + } + + src = (1ull << QCOM_SCM_VMID_HLOS); + + for (i = 0; i < mem_parcel->n_mem_entries; i++) { + src_cpy = src; + ret = qcom_scm_assign_mem(le64_to_cpu(mem_parcel->mem_entries[i].ipa_base), + le64_to_cpu(mem_parcel->mem_entries[i].size), + &src_cpy, new_perms, mem_parcel->n_acl_entries); + if (ret) { + src = 0; + for (n = 0; n < mem_parcel->n_acl_entries; n++) { + vmid = le16_to_cpu(mem_parcel->acl_entries[n].vmid); + if (vmid <= QCOM_SCM_MAX_MANAGED_VMID) + src |= (1ull << vmid); + else + src |= (1ull << QCOM_SCM_RM_MANAGED_VMID); + } + + new_perms[0].vmid = QCOM_SCM_VMID_HLOS; + + for (i--; i >= 0; i--) { + src_cpy = src; + ret = qcom_scm_assign_mem( + le64_to_cpu(mem_parcel->mem_entries[i].ipa_base), + le64_to_cpu(mem_parcel->mem_entries[i].size), + &src_cpy, new_perms, 1); + WARN_ON_ONCE(ret); + } + break; + } + } + + kfree(new_perms); + return ret; +} + +static int qcom_scm_gh_rm_post_mem_reclaim(struct gh_rm *rm, struct gh_rm_mem_parcel *mem_parcel) +{ + struct qcom_scm_vmperm new_perms; + u64 src = 0; + int ret = 0, i, n; + u16 vmid; + + new_perms.vmid = QCOM_SCM_VMID_HLOS; + new_perms.perm = QCOM_SCM_PERM_EXEC | QCOM_SCM_PERM_WRITE | QCOM_SCM_PERM_READ; + + for (n = 0; n < mem_parcel->n_acl_entries; n++) { + vmid = le16_to_cpu(mem_parcel->acl_entries[n].vmid); + if (vmid <= QCOM_SCM_MAX_MANAGED_VMID) + src |= (1ull << vmid); + else + src |= (1ull << QCOM_SCM_RM_MANAGED_VMID); + } + + for (i = 0; i < mem_parcel->n_mem_entries; i++) { + ret = qcom_scm_assign_mem(le64_to_cpu(mem_parcel->mem_entries[i].ipa_base), + le64_to_cpu(mem_parcel->mem_entries[i].size), + &src, &new_perms, 1); + WARN_ON_ONCE(ret); + } + + return ret; +} + +static struct gunyah_rm_platform_ops qcom_scm_gh_rm_platform_ops = { + .pre_mem_share = qcom_scm_gh_rm_pre_mem_share, + .post_mem_reclaim = qcom_scm_gh_rm_post_mem_reclaim, +}; + static int qcom_scm_find_dload_address(struct device *dev, u64 *addr) { struct device_node *tcsr; @@ -1414,6 +1511,9 @@ static int qcom_scm_probe(struct platform_device *pdev) if (download_mode) qcom_scm_set_download_mode(true); + if (gh_rm_register_platform_ops(&qcom_scm_gh_rm_platform_ops)) + dev_warn(__scm->dev, "Gunyah RM platform ops were already registered\n"); + return 0; }