diff mbox series

[v2,2/2] firmware: qcom_scm: Support multiple waitq contexts

Message ID b736ba58b4a4164f7232de7e075102cd2818b1e7.1724968351.git.quic_uchalich@quicinc.com
State New
Headers show
Series SCM: Support latest version of waitq-aware firmware | expand

Commit Message

Unnathi Chalicheemala Aug. 29, 2024, 10:15 p.m. UTC
Currently, only a single waitqueue context exists, with waitqueue id zero.
Multi-waitqueue mechanism is added in firmware to support the case when
multiple VMs make SMC calls or single VM making multiple calls on same CPU.

When VMs make SMC call, firmware will allocate waitqueue context assuming
the SMC call to be a blocking call. SMC calls that cannot acquire resources
are returned to sleep in the calling VM. When resource is available, VM
will be notified to wake sleeping thread and resume SMC call.
SM8650 firmware can allocate two such waitq contexts so create these two
waitqueue contexts.

Unique waitqueue contexts are supported by a dynamically sized array where
each unique wq_ctx is associated with a struct completion variable for easy
lookup. To get the number of waitqueue contexts directly from firmware,
qcom_scm_query_waitq_cnt() is introduced. On older targets which support
only a single waitqueue, wq_cnt is set to 1 as SCM call for
query_waitq_cnt() is not implemented for single waitqueue case.

Signed-off-by: Unnathi Chalicheemala <quic_uchalich@quicinc.com>
---
 drivers/firmware/qcom/qcom_scm.c | 82 +++++++++++++++++++++++---------
 1 file changed, 60 insertions(+), 22 deletions(-)

Comments

Unnathi Chalicheemala Sept. 16, 2024, 11:57 p.m. UTC | #1
On 9/4/2024 2:54 PM, Bjorn Andersson wrote:
> On Thu, Aug 29, 2024 at 03:15:55PM GMT, Unnathi Chalicheemala wrote:
>> Currently, only a single waitqueue context exists, with waitqueue id zero.
>> Multi-waitqueue mechanism is added in firmware to support the case when
>> multiple VMs make SMC calls or single VM making multiple calls on same CPU.
>>
>> When VMs make SMC call, firmware will allocate waitqueue context assuming
>> the SMC call to be a blocking call. SMC calls that cannot acquire resources
>> are returned to sleep in the calling VM. When resource is available, VM
>> will be notified to wake sleeping thread and resume SMC call.
>> SM8650 firmware can allocate two such waitq contexts so create these two
>> waitqueue contexts.
>>
>> Unique waitqueue contexts are supported by a dynamically sized array where
>> each unique wq_ctx is associated with a struct completion variable for easy
>> lookup. To get the number of waitqueue contexts directly from firmware,
>> qcom_scm_query_waitq_cnt() is introduced. On older targets which support
>> only a single waitqueue, wq_cnt is set to 1 as SCM call for
>> query_waitq_cnt() is not implemented for single waitqueue case.
>>
>> Signed-off-by: Unnathi Chalicheemala <quic_uchalich@quicinc.com>
>> ---
>>  drivers/firmware/qcom/qcom_scm.c | 82 +++++++++++++++++++++++---------
>>  1 file changed, 60 insertions(+), 22 deletions(-)
>>
>> diff --git a/drivers/firmware/qcom/qcom_scm.c b/drivers/firmware/qcom/qcom_scm.c
>> index ed51fbb1c065..b2c5505de681 100644
>> --- a/drivers/firmware/qcom/qcom_scm.c
>> +++ b/drivers/firmware/qcom/qcom_scm.c
>> @@ -44,12 +44,13 @@ static bool download_mode = IS_ENABLED(CONFIG_QCOM_SCM_DOWNLOAD_MODE_DEFAULT);
>>  module_param(download_mode, bool, 0);
>>  
>>  struct qcom_scm {
>> +	int wq_cnt;
> 
> Does it make sense for this to be negative? Please make it unsigned.
> 
> Also, might not be the most significant member of this struct, so
> perhaps you can move it further down?
> 
Ack.
>>  	struct device *dev;
>>  	struct clk *core_clk;
>>  	struct clk *iface_clk;
>>  	struct clk *bus_clk;
>>  	struct icc_path *path;
>> -	struct completion waitq_comp;
>> +	struct completion *waitq;
>>  	struct reset_controller_dev reset;
>>  
>>  	/* control access to the interconnect path */
>> @@ -1850,6 +1851,31 @@ static int qcom_scm_fill_irq_fwspec_params(struct irq_fwspec *fwspec, u32 virq)
>>  	return 0;
>>  }
>>  
>> +static int qcom_scm_query_waitq_count(void)
>> +{
>> +	bool avail;
>> +	int count;
>> +	int ret;
>> +	struct qcom_scm_desc desc = {
>> +		.svc = QCOM_SCM_SVC_WAITQ,
>> +		.cmd = QCOM_SCM_WAITQ_GET_INFO,
>> +		.owner = ARM_SMCCC_OWNER_SIP
>> +	};
>> +	struct qcom_scm_res res;
>> +
>> +	avail = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_WAITQ, QCOM_SCM_WAITQ_GET_INFO);
>> +	if (!avail) {
>> +		count = 1;
>> +		return count;
> 
> count is a local variable, so just return count; and drop the {} please.
> 
> 
> Perhaps even drop the local boolean variable:
> 
> 	if (!__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_WAITQ, QCOM_SCM_WAITQ_GET_INFO))
> 		return 1;
> 
Ack.
>> +	}
>> +
>> +	ret = qcom_scm_call_atomic(__scm->dev, &desc, &res);
>> +	if (ret)
> 
> (Keep this local variable, as that's in line with the style...)
> 
>> +		return ret;
>> +	count = res.result[0] & 0xff;
>> +	return count;
> 
> Again, return res.result[0] & 0xff; should be sufficient, no need for a
> local variable immediately followed by a return statement.
> 
Ack.
>> +}
>> +
>>  static int qcom_scm_get_waitq_irq(void)
>>  {
>>  	int ret;
>> @@ -1876,42 +1902,40 @@ static int qcom_scm_get_waitq_irq(void)
>>  	return ret;
>>  }
>>  
>> -static int qcom_scm_assert_valid_wq_ctx(u32 wq_ctx)
>> +static struct completion *qcom_scm_get_completion(u32 wq_ctx)
>>  {
>> -	/* FW currently only supports a single wq_ctx (zero).
>> -	 * TODO: Update this logic to include dynamic allocation and lookup of
>> -	 * completion structs when FW supports more wq_ctx values.
>> -	 */
>> -	if (wq_ctx != 0) {
>> -		dev_err(__scm->dev, "Firmware unexpectedly passed non-zero wq_ctx\n");
>> -		return -EINVAL;
>> -	}
>> +	struct completion *wq;
>>  
>> -	return 0;
>> +	if (wq_ctx >= __scm->wq_cnt)
> 
> I'm guessing that we're not expecting to ever hit this, but if we do, we
> will fail a qcom_scm_call() or qcom_scm_call_atomic() call, giving
> someone down the road a bad week of debugging...
> 
> How about wrapping the conditional in a WARN_ON_ONCE()?
> 
Yes understood, ack.
>> +		return ERR_PTR(-EINVAL);
>> +
>> +	wq = &__scm->waitq[wq_ctx];
>> +
>> +	return wq;
>>  }
>>  
>>  int qcom_scm_wait_for_wq_completion(u32 wq_ctx)
>>  {
>> -	int ret;
>> +	struct completion *wq;
>>  
>> -	ret = qcom_scm_assert_valid_wq_ctx(wq_ctx);
>> -	if (ret)
>> -		return ret;
>> +	wq = qcom_scm_get_completion(wq_ctx);
>> +	if (IS_ERR(wq))
>> +		return PTR_ERR(wq);
>>  
>> -	wait_for_completion(&__scm->waitq_comp);
>> +	wait_for_completion(wq);
>>  
>>  	return 0;
>>  }
>>  
>>  static int qcom_scm_waitq_wakeup(unsigned int wq_ctx)
>>  {
>> -	int ret;
>> +	struct completion *wq;
>>  
>> -	ret = qcom_scm_assert_valid_wq_ctx(wq_ctx);
>> -	if (ret)
>> -		return ret;
>> +	wq = qcom_scm_get_completion(wq_ctx);
>> +	if (IS_ERR(wq))
>> +		return PTR_ERR(wq);
>>  
>> -	complete(&__scm->waitq_comp);
>> +	complete(wq);
>>  
>>  	return 0;
>>  }
>> @@ -1948,6 +1972,7 @@ static int qcom_scm_probe(struct platform_device *pdev)
>>  	struct qcom_tzmem_pool_config pool_config;
>>  	struct qcom_scm *scm;
>>  	int irq, ret;
>> +	int i;
>>  
>>  	scm = devm_kzalloc(&pdev->dev, sizeof(*scm), GFP_KERNEL);
>>  	if (!scm)
>> @@ -1958,7 +1983,6 @@ static int qcom_scm_probe(struct platform_device *pdev)
>>  	if (ret < 0)
>>  		return ret;
>>  
>> -	init_completion(&scm->waitq_comp);
>>  	mutex_init(&scm->scm_bw_lock);
>>  
>>  	scm->path = devm_of_icc_get(&pdev->dev, NULL);
>> @@ -1993,6 +2017,20 @@ static int qcom_scm_probe(struct platform_device *pdev)
>>  	/* Let all above stores be available after this */
>>  	smp_store_release(&__scm, scm);
> 
> Should have spotted this earlier... But if any code below this point
> takes an error path (i.e. we return non-0 from hereon) devres will free
> __scm and anyone calling the qcom_scm API will hit a use-after-free.
> 
> Add to that it doesn't seem like a good idea to have
> qcom_scm_is_available() return true until we have setup the wait queue
> count or setup tzmem at least.
> 
Would the other calls that go through error path below need to be before the smp_store_release?
Just wondering if that needs to be fixed in a separate patch..

And I think the waitq initialization before the smp_store_release should be okay.
>>  
>> +	platform_set_drvdata(pdev, scm);
> 
> I believe this is a leftover from previous versions of this patch?
> 
Yes, will remove this.
> Regards,
> Bjorn
> 
>> +	ret = qcom_scm_query_waitq_count();
>> +	if (ret < 0)
>> +		return ret;
>> +
>> +	scm->wq_cnt = ret;
>> +
>> +	scm->waitq = devm_kcalloc(&pdev->dev, scm->wq_cnt, sizeof(*scm->waitq), GFP_KERNEL);
>> +	if (!scm->waitq)
>> +		return -ENOMEM;
>> +
>> +	for (i = 0; i < scm->wq_cnt; i++)
>> +		init_completion(&scm->waitq[i]);
>> +
>>  	irq = qcom_scm_get_waitq_irq();
>>  	if (irq < 0) {
>>  		if (irq != -ENXIO)
>> -- 
>> 2.34.1
>>
>
diff mbox series

Patch

diff --git a/drivers/firmware/qcom/qcom_scm.c b/drivers/firmware/qcom/qcom_scm.c
index ed51fbb1c065..b2c5505de681 100644
--- a/drivers/firmware/qcom/qcom_scm.c
+++ b/drivers/firmware/qcom/qcom_scm.c
@@ -44,12 +44,13 @@  static bool download_mode = IS_ENABLED(CONFIG_QCOM_SCM_DOWNLOAD_MODE_DEFAULT);
 module_param(download_mode, bool, 0);
 
 struct qcom_scm {
+	int wq_cnt;
 	struct device *dev;
 	struct clk *core_clk;
 	struct clk *iface_clk;
 	struct clk *bus_clk;
 	struct icc_path *path;
-	struct completion waitq_comp;
+	struct completion *waitq;
 	struct reset_controller_dev reset;
 
 	/* control access to the interconnect path */
@@ -1850,6 +1851,31 @@  static int qcom_scm_fill_irq_fwspec_params(struct irq_fwspec *fwspec, u32 virq)
 	return 0;
 }
 
+static int qcom_scm_query_waitq_count(void)
+{
+	bool avail;
+	int count;
+	int ret;
+	struct qcom_scm_desc desc = {
+		.svc = QCOM_SCM_SVC_WAITQ,
+		.cmd = QCOM_SCM_WAITQ_GET_INFO,
+		.owner = ARM_SMCCC_OWNER_SIP
+	};
+	struct qcom_scm_res res;
+
+	avail = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_WAITQ, QCOM_SCM_WAITQ_GET_INFO);
+	if (!avail) {
+		count = 1;
+		return count;
+	}
+
+	ret = qcom_scm_call_atomic(__scm->dev, &desc, &res);
+	if (ret)
+		return ret;
+	count = res.result[0] & 0xff;
+	return count;
+}
+
 static int qcom_scm_get_waitq_irq(void)
 {
 	int ret;
@@ -1876,42 +1902,40 @@  static int qcom_scm_get_waitq_irq(void)
 	return ret;
 }
 
-static int qcom_scm_assert_valid_wq_ctx(u32 wq_ctx)
+static struct completion *qcom_scm_get_completion(u32 wq_ctx)
 {
-	/* FW currently only supports a single wq_ctx (zero).
-	 * TODO: Update this logic to include dynamic allocation and lookup of
-	 * completion structs when FW supports more wq_ctx values.
-	 */
-	if (wq_ctx != 0) {
-		dev_err(__scm->dev, "Firmware unexpectedly passed non-zero wq_ctx\n");
-		return -EINVAL;
-	}
+	struct completion *wq;
 
-	return 0;
+	if (wq_ctx >= __scm->wq_cnt)
+		return ERR_PTR(-EINVAL);
+
+	wq = &__scm->waitq[wq_ctx];
+
+	return wq;
 }
 
 int qcom_scm_wait_for_wq_completion(u32 wq_ctx)
 {
-	int ret;
+	struct completion *wq;
 
-	ret = qcom_scm_assert_valid_wq_ctx(wq_ctx);
-	if (ret)
-		return ret;
+	wq = qcom_scm_get_completion(wq_ctx);
+	if (IS_ERR(wq))
+		return PTR_ERR(wq);
 
-	wait_for_completion(&__scm->waitq_comp);
+	wait_for_completion(wq);
 
 	return 0;
 }
 
 static int qcom_scm_waitq_wakeup(unsigned int wq_ctx)
 {
-	int ret;
+	struct completion *wq;
 
-	ret = qcom_scm_assert_valid_wq_ctx(wq_ctx);
-	if (ret)
-		return ret;
+	wq = qcom_scm_get_completion(wq_ctx);
+	if (IS_ERR(wq))
+		return PTR_ERR(wq);
 
-	complete(&__scm->waitq_comp);
+	complete(wq);
 
 	return 0;
 }
@@ -1948,6 +1972,7 @@  static int qcom_scm_probe(struct platform_device *pdev)
 	struct qcom_tzmem_pool_config pool_config;
 	struct qcom_scm *scm;
 	int irq, ret;
+	int i;
 
 	scm = devm_kzalloc(&pdev->dev, sizeof(*scm), GFP_KERNEL);
 	if (!scm)
@@ -1958,7 +1983,6 @@  static int qcom_scm_probe(struct platform_device *pdev)
 	if (ret < 0)
 		return ret;
 
-	init_completion(&scm->waitq_comp);
 	mutex_init(&scm->scm_bw_lock);
 
 	scm->path = devm_of_icc_get(&pdev->dev, NULL);
@@ -1993,6 +2017,20 @@  static int qcom_scm_probe(struct platform_device *pdev)
 	/* Let all above stores be available after this */
 	smp_store_release(&__scm, scm);
 
+	platform_set_drvdata(pdev, scm);
+	ret = qcom_scm_query_waitq_count();
+	if (ret < 0)
+		return ret;
+
+	scm->wq_cnt = ret;
+
+	scm->waitq = devm_kcalloc(&pdev->dev, scm->wq_cnt, sizeof(*scm->waitq), GFP_KERNEL);
+	if (!scm->waitq)
+		return -ENOMEM;
+
+	for (i = 0; i < scm->wq_cnt; i++)
+		init_completion(&scm->waitq[i]);
+
 	irq = qcom_scm_get_waitq_irq();
 	if (irq < 0) {
 		if (irq != -ENXIO)