Message ID | 20220905155759.17743-1-sumitg@nvidia.com |
---|---|
State | Superseded |
Headers | show |
Series | cpufreq: tegra239: Add support for T239 | expand |
On Mon, Sep 05, 2022 at 09:27:59PM +0530, Sumit Gupta wrote: > Adding support for Tegra239 SoC which has eight cores in > a single cluster. Also, moving num_clusters to soc data > to avoid over allocating memory for four clusters always. > > Signed-off-by: Sumit Gupta <sumitg@nvidia.com> > --- > drivers/cpufreq/tegra194-cpufreq.c | 29 +++++++++++++++-------------- > 1 file changed, 15 insertions(+), 14 deletions(-) The subject is a little confusing. Typically the prefix refers to the driver, so it would be something like "cpufreq: tegra194: ". Furthermore, please always spell out Tegra239 for consistency. This makes it easier to grep for. Otherwise, looks good. So with the above fixed, this is: Acked-by: Thierry Reding <treding@nvidia.com> > > diff --git a/drivers/cpufreq/tegra194-cpufreq.c b/drivers/cpufreq/tegra194-cpufreq.c > index 1216046cf4c2..f38a760da61b 100644 > --- a/drivers/cpufreq/tegra194-cpufreq.c > +++ b/drivers/cpufreq/tegra194-cpufreq.c > @@ -38,14 +38,6 @@ > /* cpufreq transisition latency */ > #define TEGRA_CPUFREQ_TRANSITION_LATENCY (300 * 1000) /* unit in nanoseconds */ > > -enum cluster { > - CLUSTER0, > - CLUSTER1, > - CLUSTER2, > - CLUSTER3, > - MAX_CLUSTERS, > -}; > - > struct tegra_cpu_ctr { > u32 cpu; > u32 coreclk_cnt, last_coreclk_cnt; > @@ -67,12 +59,12 @@ struct tegra_cpufreq_ops { > struct tegra_cpufreq_soc { > struct tegra_cpufreq_ops *ops; > int maxcpus_per_cluster; > + size_t num_clusters; > phys_addr_t actmon_cntr_base; > }; > > struct tegra194_cpufreq_data { > void __iomem *regs; > - size_t num_clusters; > struct cpufreq_frequency_table **tables; > const struct tegra_cpufreq_soc *soc; > }; > @@ -166,6 +158,14 @@ static const struct tegra_cpufreq_soc tegra234_cpufreq_soc = { > .ops = &tegra234_cpufreq_ops, > .actmon_cntr_base = 0x9000, > .maxcpus_per_cluster = 4, > + .num_clusters = 3, > +}; > + > +const struct tegra_cpufreq_soc tegra239_cpufreq_soc = { > + .ops = &tegra234_cpufreq_ops, > + .actmon_cntr_base = 0x4000, > + .maxcpus_per_cluster = 8, > + .num_clusters = 1, > }; > > static void tegra194_get_cpu_cluster_id(u32 cpu, u32 *cpuid, u32 *clusterid) > @@ -382,7 +382,7 @@ static int tegra194_cpufreq_init(struct cpufreq_policy *policy) > > data->soc->ops->get_cpu_cluster_id(policy->cpu, NULL, &clusterid); > > - if (clusterid >= data->num_clusters || !data->tables[clusterid]) > + if (clusterid >= data->soc->num_clusters || !data->tables[clusterid]) > return -EINVAL; > > start_cpu = rounddown(policy->cpu, maxcpus_per_cluster); > @@ -433,6 +433,7 @@ static struct tegra_cpufreq_ops tegra194_cpufreq_ops = { > static const struct tegra_cpufreq_soc tegra194_cpufreq_soc = { > .ops = &tegra194_cpufreq_ops, > .maxcpus_per_cluster = 2, > + .num_clusters = 4, > }; > > static void tegra194_cpufreq_free_resources(void) > @@ -525,15 +526,14 @@ static int tegra194_cpufreq_probe(struct platform_device *pdev) > > soc = of_device_get_match_data(&pdev->dev); > > - if (soc->ops && soc->maxcpus_per_cluster) { > + if (soc->ops && soc->maxcpus_per_cluster && soc->num_clusters) { > data->soc = soc; > } else { > dev_err(&pdev->dev, "soc data missing\n"); > return -EINVAL; > } > > - data->num_clusters = MAX_CLUSTERS; > - data->tables = devm_kcalloc(&pdev->dev, data->num_clusters, > + data->tables = devm_kcalloc(&pdev->dev, data->soc->num_clusters, > sizeof(*data->tables), GFP_KERNEL); > if (!data->tables) > return -ENOMEM; > @@ -558,7 +558,7 @@ static int tegra194_cpufreq_probe(struct platform_device *pdev) > goto put_bpmp; > } > > - for (i = 0; i < data->num_clusters; i++) { > + for (i = 0; i < data->soc->num_clusters; i++) { > data->tables[i] = init_freq_table(pdev, bpmp, i); > if (IS_ERR(data->tables[i])) { > err = PTR_ERR(data->tables[i]); > @@ -590,6 +590,7 @@ static int tegra194_cpufreq_remove(struct platform_device *pdev) > static const struct of_device_id tegra194_cpufreq_of_match[] = { > { .compatible = "nvidia,tegra194-ccplex", .data = &tegra194_cpufreq_soc }, > { .compatible = "nvidia,tegra234-ccplex-cluster", .data = &tegra234_cpufreq_soc }, > + { .compatible = "nvidia,tegra239-ccplex-cluster", .data = &tegra239_cpufreq_soc }, > { /* sentinel */ } > }; > > -- > 2.17.1 >
On Mon, Sep 05, 2022 at 09:27:59PM +0530, Sumit Gupta wrote: > Adding support for Tegra239 SoC which has eight cores in > a single cluster. Also, moving num_clusters to soc data Found two more minor things: s/soc/SoC/ for consistent spelling. > to avoid over allocating memory for four clusters always. > > Signed-off-by: Sumit Gupta <sumitg@nvidia.com> > --- > drivers/cpufreq/tegra194-cpufreq.c | 29 +++++++++++++++-------------- > 1 file changed, 15 insertions(+), 14 deletions(-) > > diff --git a/drivers/cpufreq/tegra194-cpufreq.c b/drivers/cpufreq/tegra194-cpufreq.c > index 1216046cf4c2..f38a760da61b 100644 > --- a/drivers/cpufreq/tegra194-cpufreq.c > +++ b/drivers/cpufreq/tegra194-cpufreq.c > @@ -38,14 +38,6 @@ > /* cpufreq transisition latency */ > #define TEGRA_CPUFREQ_TRANSITION_LATENCY (300 * 1000) /* unit in nanoseconds */ > > -enum cluster { > - CLUSTER0, > - CLUSTER1, > - CLUSTER2, > - CLUSTER3, > - MAX_CLUSTERS, > -}; > - > struct tegra_cpu_ctr { > u32 cpu; > u32 coreclk_cnt, last_coreclk_cnt; > @@ -67,12 +59,12 @@ struct tegra_cpufreq_ops { > struct tegra_cpufreq_soc { > struct tegra_cpufreq_ops *ops; > int maxcpus_per_cluster; > + size_t num_clusters; Might want to make this unsigned int while at it. size_t is preferred for things that actually refer to the size of something, while this is a count, so unsigned int fits slightly better. It's admittedly a bit pedantic... Thierry
Hi Thierry, Thankyou for the review. I will do the change and send v2 soon. Regards, Sumit Gupta On 15/09/22 17:51, Thierry Reding wrote: > Subject: > Re: [Patch] cpufreq: tegra239: Add support for T239 > From: > Thierry Reding <thierry.reding@gmail.com> > Date: > 15/09/22, 17:51 > > To: > Sumit Gupta <sumitg@nvidia.com> > CC: > viresh.kumar@linaro.org, rafael@kernel.org, treding@nvidia.com, > jonathanh@nvidia.com, linux-pm@vger.kernel.org, > linux-tegra@vger.kernel.org, linux-kernel@vger.kernel.org, > bbasu@nvidia.com, sanjayc@nvidia.com, ksitaraman@nvidia.com > > > External email: Use caution opening links or attachments > > > ForwardedMessage.eml > > Subject: > Re: [Patch] cpufreq: tegra239: Add support for T239 > From: > Thierry Reding <thierry.reding@gmail.com> > Date: > 15/09/22, 17:51 > > To: > Sumit Gupta <sumitg@nvidia.com> > CC: > viresh.kumar@linaro.org, rafael@kernel.org, treding@nvidia.com, > jonathanh@nvidia.com, linux-pm@vger.kernel.org, > linux-tegra@vger.kernel.org, linux-kernel@vger.kernel.org, > bbasu@nvidia.com, sanjayc@nvidia.com, ksitaraman@nvidia.com > > > On Mon, Sep 05, 2022 at 09:27:59PM +0530, Sumit Gupta wrote: >> Adding support for Tegra239 SoC which has eight cores in >> a single cluster. Also, moving num_clusters to soc data >> to avoid over allocating memory for four clusters always. >> >> Signed-off-by: Sumit Gupta<sumitg@nvidia.com> >> --- >> drivers/cpufreq/tegra194-cpufreq.c | 29 +++++++++++++++-------------- >> 1 file changed, 15 insertions(+), 14 deletions(-) > The subject is a little confusing. Typically the prefix refers to the > driver, so it would be something like "cpufreq: tegra194: ". > > Furthermore, please always spell out Tegra239 for consistency. This > makes it easier to grep for. > > Otherwise, looks good. So with the above fixed, this is: > > Acked-by: Thierry Reding<treding@nvidia.com> > >> diff --git a/drivers/cpufreq/tegra194-cpufreq.c b/drivers/cpufreq/tegra194-cpufreq.c >> index 1216046cf4c2..f38a760da61b 100644 >> --- a/drivers/cpufreq/tegra194-cpufreq.c >> +++ b/drivers/cpufreq/tegra194-cpufreq.c >> @@ -38,14 +38,6 @@ >> /* cpufreq transisition latency */ >> #define TEGRA_CPUFREQ_TRANSITION_LATENCY (300 * 1000) /* unit in nanoseconds */ >> >> -enum cluster { >> - CLUSTER0, >> - CLUSTER1, >> - CLUSTER2, >> - CLUSTER3, >> - MAX_CLUSTERS, >> -}; >> - >> struct tegra_cpu_ctr { >> u32 cpu; >> u32 coreclk_cnt, last_coreclk_cnt; >> @@ -67,12 +59,12 @@ struct tegra_cpufreq_ops { >> struct tegra_cpufreq_soc { >> struct tegra_cpufreq_ops *ops; >> int maxcpus_per_cluster; >> + size_t num_clusters; >> phys_addr_t actmon_cntr_base; >> }; >> >> struct tegra194_cpufreq_data { >> void __iomem *regs; >> - size_t num_clusters; >> struct cpufreq_frequency_table **tables; >> const struct tegra_cpufreq_soc *soc; >> }; >> @@ -166,6 +158,14 @@ static const struct tegra_cpufreq_soc tegra234_cpufreq_soc = { >> .ops = &tegra234_cpufreq_ops, >> .actmon_cntr_base = 0x9000, >> .maxcpus_per_cluster = 4, >> + .num_clusters = 3, >> +}; >> + >> +const struct tegra_cpufreq_soc tegra239_cpufreq_soc = { >> + .ops = &tegra234_cpufreq_ops, >> + .actmon_cntr_base = 0x4000, >> + .maxcpus_per_cluster = 8, >> + .num_clusters = 1, >> }; >> >> static void tegra194_get_cpu_cluster_id(u32 cpu, u32 *cpuid, u32 *clusterid) >> @@ -382,7 +382,7 @@ static int tegra194_cpufreq_init(struct cpufreq_policy *policy) >> >> data->soc->ops->get_cpu_cluster_id(policy->cpu, NULL, &clusterid); >> >> - if (clusterid >= data->num_clusters || !data->tables[clusterid]) >> + if (clusterid >= data->soc->num_clusters || !data->tables[clusterid]) >> return -EINVAL; >> >> start_cpu = rounddown(policy->cpu, maxcpus_per_cluster); >> @@ -433,6 +433,7 @@ static struct tegra_cpufreq_ops tegra194_cpufreq_ops = { >> static const struct tegra_cpufreq_soc tegra194_cpufreq_soc = { >> .ops = &tegra194_cpufreq_ops, >> .maxcpus_per_cluster = 2, >> + .num_clusters = 4, >> }; >> >> static void tegra194_cpufreq_free_resources(void) >> @@ -525,15 +526,14 @@ static int tegra194_cpufreq_probe(struct platform_device *pdev) >> >> soc = of_device_get_match_data(&pdev->dev); >> >> - if (soc->ops && soc->maxcpus_per_cluster) { >> + if (soc->ops && soc->maxcpus_per_cluster && soc->num_clusters) { >> data->soc = soc; >> } else { >> dev_err(&pdev->dev, "soc data missing\n"); >> return -EINVAL; >> } >> >> - data->num_clusters = MAX_CLUSTERS; >> - data->tables = devm_kcalloc(&pdev->dev, data->num_clusters, >> + data->tables = devm_kcalloc(&pdev->dev, data->soc->num_clusters, >> sizeof(*data->tables), GFP_KERNEL); >> if (!data->tables) >> return -ENOMEM; >> @@ -558,7 +558,7 @@ static int tegra194_cpufreq_probe(struct platform_device *pdev) >> goto put_bpmp; >> } >> >> - for (i = 0; i < data->num_clusters; i++) { >> + for (i = 0; i < data->soc->num_clusters; i++) { >> data->tables[i] = init_freq_table(pdev, bpmp, i); >> if (IS_ERR(data->tables[i])) { >> err = PTR_ERR(data->tables[i]); >> @@ -590,6 +590,7 @@ static int tegra194_cpufreq_remove(struct platform_device *pdev) >> static const struct of_device_id tegra194_cpufreq_of_match[] = { >> { .compatible = "nvidia,tegra194-ccplex", .data = &tegra194_cpufreq_soc }, >> { .compatible = "nvidia,tegra234-ccplex-cluster", .data = &tegra234_cpufreq_soc }, >> + { .compatible = "nvidia,tegra239-ccplex-cluster", .data = &tegra239_cpufreq_soc }, >> { /* sentinel */ } >> }; >> >> -- >> 2.17.1 >> > Attachments: > > ForwardedMessage.eml 10.5 KB >
diff --git a/drivers/cpufreq/tegra194-cpufreq.c b/drivers/cpufreq/tegra194-cpufreq.c index 1216046cf4c2..f38a760da61b 100644 --- a/drivers/cpufreq/tegra194-cpufreq.c +++ b/drivers/cpufreq/tegra194-cpufreq.c @@ -38,14 +38,6 @@ /* cpufreq transisition latency */ #define TEGRA_CPUFREQ_TRANSITION_LATENCY (300 * 1000) /* unit in nanoseconds */ -enum cluster { - CLUSTER0, - CLUSTER1, - CLUSTER2, - CLUSTER3, - MAX_CLUSTERS, -}; - struct tegra_cpu_ctr { u32 cpu; u32 coreclk_cnt, last_coreclk_cnt; @@ -67,12 +59,12 @@ struct tegra_cpufreq_ops { struct tegra_cpufreq_soc { struct tegra_cpufreq_ops *ops; int maxcpus_per_cluster; + size_t num_clusters; phys_addr_t actmon_cntr_base; }; struct tegra194_cpufreq_data { void __iomem *regs; - size_t num_clusters; struct cpufreq_frequency_table **tables; const struct tegra_cpufreq_soc *soc; }; @@ -166,6 +158,14 @@ static const struct tegra_cpufreq_soc tegra234_cpufreq_soc = { .ops = &tegra234_cpufreq_ops, .actmon_cntr_base = 0x9000, .maxcpus_per_cluster = 4, + .num_clusters = 3, +}; + +const struct tegra_cpufreq_soc tegra239_cpufreq_soc = { + .ops = &tegra234_cpufreq_ops, + .actmon_cntr_base = 0x4000, + .maxcpus_per_cluster = 8, + .num_clusters = 1, }; static void tegra194_get_cpu_cluster_id(u32 cpu, u32 *cpuid, u32 *clusterid) @@ -382,7 +382,7 @@ static int tegra194_cpufreq_init(struct cpufreq_policy *policy) data->soc->ops->get_cpu_cluster_id(policy->cpu, NULL, &clusterid); - if (clusterid >= data->num_clusters || !data->tables[clusterid]) + if (clusterid >= data->soc->num_clusters || !data->tables[clusterid]) return -EINVAL; start_cpu = rounddown(policy->cpu, maxcpus_per_cluster); @@ -433,6 +433,7 @@ static struct tegra_cpufreq_ops tegra194_cpufreq_ops = { static const struct tegra_cpufreq_soc tegra194_cpufreq_soc = { .ops = &tegra194_cpufreq_ops, .maxcpus_per_cluster = 2, + .num_clusters = 4, }; static void tegra194_cpufreq_free_resources(void) @@ -525,15 +526,14 @@ static int tegra194_cpufreq_probe(struct platform_device *pdev) soc = of_device_get_match_data(&pdev->dev); - if (soc->ops && soc->maxcpus_per_cluster) { + if (soc->ops && soc->maxcpus_per_cluster && soc->num_clusters) { data->soc = soc; } else { dev_err(&pdev->dev, "soc data missing\n"); return -EINVAL; } - data->num_clusters = MAX_CLUSTERS; - data->tables = devm_kcalloc(&pdev->dev, data->num_clusters, + data->tables = devm_kcalloc(&pdev->dev, data->soc->num_clusters, sizeof(*data->tables), GFP_KERNEL); if (!data->tables) return -ENOMEM; @@ -558,7 +558,7 @@ static int tegra194_cpufreq_probe(struct platform_device *pdev) goto put_bpmp; } - for (i = 0; i < data->num_clusters; i++) { + for (i = 0; i < data->soc->num_clusters; i++) { data->tables[i] = init_freq_table(pdev, bpmp, i); if (IS_ERR(data->tables[i])) { err = PTR_ERR(data->tables[i]); @@ -590,6 +590,7 @@ static int tegra194_cpufreq_remove(struct platform_device *pdev) static const struct of_device_id tegra194_cpufreq_of_match[] = { { .compatible = "nvidia,tegra194-ccplex", .data = &tegra194_cpufreq_soc }, { .compatible = "nvidia,tegra234-ccplex-cluster", .data = &tegra234_cpufreq_soc }, + { .compatible = "nvidia,tegra239-ccplex-cluster", .data = &tegra239_cpufreq_soc }, { /* sentinel */ } };
Adding support for Tegra239 SoC which has eight cores in a single cluster. Also, moving num_clusters to soc data to avoid over allocating memory for four clusters always. Signed-off-by: Sumit Gupta <sumitg@nvidia.com> --- drivers/cpufreq/tegra194-cpufreq.c | 29 +++++++++++++++-------------- 1 file changed, 15 insertions(+), 14 deletions(-)