diff mbox series

[v1,7/7] arm64: dts: sdm845: wireup the thermal trip points to cpufreq

Message ID 6c5b26e65be18222587724e066fc2e39b9f60397.1547078153.git.amit.kucheria@linaro.org
State New
Headers show
Series [v1,1/7] drivers: thermal: of-thermal: Print name of device node with error | expand

Commit Message

Amit Kucheria Jan. 10, 2019, midnight UTC
Since the big and little cpus are in the same frequency domain, use all
of them for mitigation in the cooling-map. At the lower trip points we
restrict ourselves to throttling only a few OPPs. At higher trip
temperatures, allow ourselves to be throttled to any extent.

Signed-off-by: Amit Kucheria <amit.kucheria@linaro.org>

---
 arch/arm64/boot/dts/qcom/sdm845.dtsi | 145 +++++++++++++++++++++++++++
 1 file changed, 145 insertions(+)

-- 
2.17.1

Comments

Matthias Kaehlcke Jan. 10, 2019, 2:22 a.m. UTC | #1
Hi Amit,

On Thu, Jan 10, 2019 at 05:30:56AM +0530, Amit Kucheria wrote:
> Since the big and little cpus are in the same frequency domain, use all

> of them for mitigation in the cooling-map. At the lower trip points we

> restrict ourselves to throttling only a few OPPs. At higher trip

> temperatures, allow ourselves to be throttled to any extent.

> 

> Signed-off-by: Amit Kucheria <amit.kucheria@linaro.org>

> ---

>  arch/arm64/boot/dts/qcom/sdm845.dtsi | 145 +++++++++++++++++++++++++++

>  1 file changed, 145 insertions(+)

> 

> diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi

> index 29e823b0caf4..cd6402a9aa64 100644

> --- a/arch/arm64/boot/dts/qcom/sdm845.dtsi

> +++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi

> @@ -13,6 +13,7 @@

>  #include <dt-bindings/reset/qcom,sdm845-aoss.h>

>  #include <dt-bindings/soc/qcom,rpmh-rsc.h>

>  #include <dt-bindings/clock/qcom,gcc-sdm845.h>

> +#include <dt-bindings/thermal/thermal.h>

>  

>  / {

>  	interrupt-parent = <&intc>;

> @@ -99,6 +100,7 @@

>  			compatible = "qcom,kryo385";

>  			reg = <0x0 0x0>;

>  			enable-method = "psci";

> +			#cooling-cells = <2>;

>  			next-level-cache = <&L2_0>;

>  			L2_0: l2-cache {

>  				compatible = "cache";

> @@ -114,6 +116,7 @@

>  			compatible = "qcom,kryo385";

>  			reg = <0x0 0x100>;

>  			enable-method = "psci";

> +			#cooling-cells = <2>;


This is not needed (also applies to other for other non-policy
cores). A single cpufreq device is created per frequency domain /
cluster, hence a single cooling device is registered per cluster,
which IMO makes sense given that the CPUs of a cluster can't change
their frequencies independently.

>  			next-level-cache = <&L2_100>;

>  			L2_100: l2-cache {

>  				compatible = "cache";

> @@ -126,6 +129,7 @@

>  			compatible = "qcom,kryo385";

>  			reg = <0x0 0x200>;

>  			enable-method = "psci";

> +			#cooling-cells = <2>;

>  			next-level-cache = <&L2_200>;

>  			L2_200: l2-cache {

>  				compatible = "cache";

> @@ -138,6 +142,7 @@

>  			compatible = "qcom,kryo385";

>  			reg = <0x0 0x300>;

>  			enable-method = "psci";

> +			#cooling-cells = <2>;

>  			next-level-cache = <&L2_300>;

>  			L2_300: l2-cache {

>  				compatible = "cache";

> @@ -150,6 +155,7 @@

>  			compatible = "qcom,kryo385";

>  			reg = <0x0 0x400>;

>  			enable-method = "psci";

> +			#cooling-cells = <2>;

>  			next-level-cache = <&L2_400>;

>  			L2_400: l2-cache {

>  				compatible = "cache";

> @@ -162,6 +168,7 @@

>  			compatible = "qcom,kryo385";

>  			reg = <0x0 0x500>;

>  			enable-method = "psci";

> +			#cooling-cells = <2>;

>  			next-level-cache = <&L2_500>;

>  			L2_500: l2-cache {

>  				compatible = "cache";

> @@ -174,6 +181,7 @@

>  			compatible = "qcom,kryo385";

>  			reg = <0x0 0x600>;

>  			enable-method = "psci";

> +			#cooling-cells = <2>;

>  			next-level-cache = <&L2_600>;

>  			L2_600: l2-cache {

>  				compatible = "cache";

> @@ -186,6 +194,7 @@

>  			compatible = "qcom,kryo385";

>  			reg = <0x0 0x700>;

>  			enable-method = "psci";

> +			#cooling-cells = <2>;

>  			next-level-cache = <&L2_700>;

>  			L2_700: l2-cache {

>  				compatible = "cache";

> @@ -1703,6 +1712,23 @@

>  					type = "critical";

>  				};

>  			};

> +

> +			cooling-maps {

> +				map0 {

> +					trip = <&cpu_alert0>;

> +					cooling-device = <&CPU0 THERMAL_NO_LIMIT 4>,

> +							 <&CPU1 THERMAL_NO_LIMIT 4>,


As per above, there are no cooling devices for CPU1-3 and CPU5-7.

Cheers

Matthias
Viresh Kumar Jan. 10, 2019, 6:23 a.m. UTC | #2
On 09-01-19, 18:22, Matthias Kaehlcke wrote:
> Hi Amit,

> 

> On Thu, Jan 10, 2019 at 05:30:56AM +0530, Amit Kucheria wrote:

> > Since the big and little cpus are in the same frequency domain, use all

> > of them for mitigation in the cooling-map. At the lower trip points we

> > restrict ourselves to throttling only a few OPPs. At higher trip

> > temperatures, allow ourselves to be throttled to any extent.

> > 

> > Signed-off-by: Amit Kucheria <amit.kucheria@linaro.org>

> > ---

> >  arch/arm64/boot/dts/qcom/sdm845.dtsi | 145 +++++++++++++++++++++++++++

> >  1 file changed, 145 insertions(+)

> > 

> > diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi

> > index 29e823b0caf4..cd6402a9aa64 100644

> > --- a/arch/arm64/boot/dts/qcom/sdm845.dtsi

> > +++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi

> > @@ -13,6 +13,7 @@

> >  #include <dt-bindings/reset/qcom,sdm845-aoss.h>

> >  #include <dt-bindings/soc/qcom,rpmh-rsc.h>

> >  #include <dt-bindings/clock/qcom,gcc-sdm845.h>

> > +#include <dt-bindings/thermal/thermal.h>

> >  

> >  / {

> >  	interrupt-parent = <&intc>;

> > @@ -99,6 +100,7 @@

> >  			compatible = "qcom,kryo385";

> >  			reg = <0x0 0x0>;

> >  			enable-method = "psci";

> > +			#cooling-cells = <2>;

> >  			next-level-cache = <&L2_0>;

> >  			L2_0: l2-cache {

> >  				compatible = "cache";

> > @@ -114,6 +116,7 @@

> >  			compatible = "qcom,kryo385";

> >  			reg = <0x0 0x100>;

> >  			enable-method = "psci";

> > +			#cooling-cells = <2>;

> 

> This is not needed (also applies to other for other non-policy

> cores). A single cpufreq device is created per frequency domain /

> cluster, hence a single cooling device is registered per cluster,

> which IMO makes sense given that the CPUs of a cluster can't change

> their frequencies independently.

 
> As per above, there are no cooling devices for CPU1-3 and CPU5-7.


lore.kernel.org/lkml/cover.1527244200.git.viresh.kumar@linaro.org
lore.kernel.org/lkml/b687bb6035fbb010383f4511a206abb4006679fa.1527244201.git.viresh.kumar@linaro.org

-- 
viresh
Matthias Kaehlcke Jan. 11, 2019, 7:58 p.m. UTC | #3
On Fri, Jan 11, 2019 at 09:16:53AM +0530, Viresh Kumar wrote:
> On 10-01-19, 10:42, Matthias Kaehlcke wrote:

> > Thanks for the pointer, there's always something new to learn!

> > 

> > Ok, so the policy CPU and hence the CPU registered as cooling

> > device may vary. I understand that this requires to list all possible

> > cooling devices,

> 

> I won't say that I changed DT because of a design issue with kernel,

> rather the DT shall be complete by itself and that's why that change

> was made.


fair enough

> And then we can have more things going on. For example with cpuidle

> cooling, we can individually control each CPU (and force idle on that)

> even if all CPUs are part of the same freq-domain. Each CPU shall

> expose its capabilities.


Just to gain a better understanding: is cpuidle cooling already
available for arm64 (or is there a patch set)? I came across the
relatively new idle injecting framework but it seems currently the
only user is the Intel powerclamp driver.

> > even though only one will be active at any given

> > time. However I wonder if we could change this:

> 

> I won't say it that way. I see it as all the CPUs are active during a

> cooling state, i.e. they are all participating.


agreed, I was referring to the CPU cooling device, which (without
cpuidle injection) could be considered a single device per freq domain.

> > For device tree based platform the above implies that cooling maps

> > must include a list of all possible cooling devices of a frequency

> > domain, even though only one of them will exist at any given time.

> > 

> > For example:

> > 

> > cooling-maps {

> > 	map0 {

> > 		trip = <&cpu_alert0>;

> > 		cooling-device = <&CPU0 THERMAL_NO_LIMIT 4>,

> > 				 <&CPU1 THERMAL_NO_LIMIT 4>,

> > 				 <&CPU2 THERMAL_NO_LIMIT 4>,

> > 				 <&CPU3 THERMAL_NO_LIMIT 4>;

> > 	};

> > 	map1 {

> > 		trip = <&cpu_crit0>;

> > 		cooling-device = <&CPU0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,

> > 				 <&CPU1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,

> > 				 <&CPU2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,

> > 				 <&CPU3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;

> 

> This is the right thing to do hardware description wise, no matter

> what the kernel does.


Not sure I would call it a hardware description. I'd say we pretend
the thermal configuration is a hardware description so the DT folks
don't yell at us ;-) IMO a CPU cooling device is an abstraction, I
think there is no such IP block on most systems.

It seems with cpuidle injection CPUs can perform cooling actions
individually, with that I agree that representing them as individual
cooling devices in the DT makes sense. Without that a cooling device
per freq domain would seem a resonable abstraction.

One of the reasons I dislike the above list of cooling devices is that
it is repeated for different thermal-zone/cooling-maps, but I guess
we have to live with that, would be nice if the DT would allow to do
something like this:

thermal-zones {
	cooling_maps_fd0 : cooling-maps {
		map0 {
			trip = <&cpu_alert0>;
			cooling-device = <&CPU0 THERMAL_NO_LIMIT 4>,
					 <&CPU1 THERMAL_NO_LIMIT 4>,
					 <&CPU2 THERMAL_NO_LIMIT 4>,
					 <&CPU3 THERMAL_NO_LIMIT 4>;
		};
		map1 {
			trip = <&cpu_crit0>;
			cooling-device = <&CPU0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
					 <&CPU1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
					 <&CPU2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
					 <&CPU3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
	};

	cpu0-thermal {
		...
		cooling-maps = @cooling_maps_fd0;
		...
	};

	cpu1-thermal {
		...
		cooling-maps = @cooling_maps_fd0;
		...
	};

	...
};

Cheers

Matthias
Viresh Kumar Jan. 14, 2019, 5:59 a.m. UTC | #4
On 11-01-19, 11:58, Matthias Kaehlcke wrote:
> On Fri, Jan 11, 2019 at 09:16:53AM +0530, Viresh Kumar wrote:

> Just to gain a better understanding: is cpuidle cooling already

> available for arm64 (or is there a patch set)? I came across the

> relatively new idle injecting framework but it seems currently the

> only user is the Intel powerclamp driver.


Daniel was trying to upstream it earlier:

lore.kernel.org/lkml/1522945005-7165-7-git-send-email-daniel.lezcano@linaro.org

> > > even though only one will be active at any given

> > > time. However I wonder if we could change this:

> > 

> > I won't say it that way. I see it as all the CPUs are active during a

> > cooling state, i.e. they are all participating.

> 

> agreed, I was referring to the CPU cooling device, which (without

> cpuidle injection) could be considered a single device per freq domain.


Even without cpuidle injection all CPUs actually take part in cooling.

> > > For device tree based platform the above implies that cooling maps

> > > must include a list of all possible cooling devices of a frequency

> > > domain, even though only one of them will exist at any given time.

> > > 

> > > For example:

> > > 

> > > cooling-maps {

> > > 	map0 {

> > > 		trip = <&cpu_alert0>;

> > > 		cooling-device = <&CPU0 THERMAL_NO_LIMIT 4>,

> > > 				 <&CPU1 THERMAL_NO_LIMIT 4>,

> > > 				 <&CPU2 THERMAL_NO_LIMIT 4>,

> > > 				 <&CPU3 THERMAL_NO_LIMIT 4>;

> > > 	};

> > > 	map1 {

> > > 		trip = <&cpu_crit0>;

> > > 		cooling-device = <&CPU0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,

> > > 				 <&CPU1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,

> > > 				 <&CPU2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,

> > > 				 <&CPU3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;

> > 

> > This is the right thing to do hardware description wise, no matter

> > what the kernel does.

> 

> Not sure I would call it a hardware description. I'd say we pretend

> the thermal configuration is a hardware description so the DT folks

> don't yell at us ;-) IMO a CPU cooling device is an abstraction, I

> think there is no such IP block on most systems.


Right.

> It seems with cpuidle injection CPUs can perform cooling actions

> individually, with that I agree that representing them as individual

> cooling devices in the DT makes sense. Without that a cooling device

> per freq domain would seem a resonable abstraction.


But we actually have 4 different cooling devices no matter what. The only thing
is that they switch their cooling state together. And that shouldn't bother DT
is what I thought :)

> One of the reasons I dislike the above list of cooling devices is that

> it is repeated for different thermal-zone/cooling-maps, but I guess

> we have to live with that, would be nice if the DT would allow to do

> something like this:

> 

> thermal-zones {

> 	cooling_maps_fd0 : cooling-maps {

> 		map0 {

> 			trip = <&cpu_alert0>;

> 			cooling-device = <&CPU0 THERMAL_NO_LIMIT 4>,

> 					 <&CPU1 THERMAL_NO_LIMIT 4>,

> 					 <&CPU2 THERMAL_NO_LIMIT 4>,

> 					 <&CPU3 THERMAL_NO_LIMIT 4>;

> 		};

> 		map1 {

> 			trip = <&cpu_crit0>;

> 			cooling-device = <&CPU0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,

> 					 <&CPU1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,

> 					 <&CPU2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,

> 					 <&CPU3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;

> 	};

> 

> 	cpu0-thermal {

> 		...

> 		cooling-maps = @cooling_maps_fd0;

> 		...

> 	};

> 

> 	cpu1-thermal {

> 		...

> 		cooling-maps = @cooling_maps_fd0;

> 		...

> 	};

> 

> 	...

> };


Yeah, maybe. There aren't lot of examples of such duplication though if I
remember correctly.

-- 
viresh
Amit Kucheria Jan. 14, 2019, 8:22 a.m. UTC | #5
On Sat, Jan 12, 2019 at 2:06 AM Matthias Kaehlcke <mka@chromium.org> wrote:
>

> Another concern about adding trip points later could be the node

> name. We currently have:

>

>

> trips {

>   cpu0_alert0: trip0 {

>     ...

>   };

>

>   cpu0_crit: trip1 {

>     ...

>   };

> };

>

> If we keep increasing enumeration with the node name this would become:

>

> trips {

>   cpu0_alert0: trip0 {

>     ...

>   };

>

>   cpu0_alert1: trip1 {

>     ...

>   };

>

>   cpu0_crit: trip2 {

>     ...

>   };

> };

>

> i.e. the node name of the critical trip-point changes, which might be

> a concern for dtsi's that override a value, though they should

> probably use the phandle &cpu0_crit anyway. If this is a concern we

> could change the node names to 'alert0' and 'crit'.

>

> I looked around a bit and actually I kinda like the naming scheme used

> by hisilicon/hi6220.dtsi, mediatek/mt8173.dtsi and rockchip/rk3328.dtsi

> (with minor variations):

>

> trips {

>         threshold: trip-point@0 {

>                 temperature = <68000>;

>                 hysteresis = <2000>;

>                 type = "passive";

>         };

>

>         target: trip-point@1 {

>                 temperature = <85000>;

>                 hysteresis = <2000>;

>                 type = "passive";

>         };

>

>         cpu_crit: cpu_crit@0 {

>                 temperature = <115000>;

>                 hysteresis = <2000>;

>                 type = "critical";

>         };

> };

>

> If we were to use this we'd have to adapt it slightly since we have

> multiple thermal zones. In line with the other scheme this could be

> cpuN_threshold, cpuN_target and cpuN_crit.

>


I like this scheme enough that I adopted it for v2.
diff mbox series

Patch

diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
index 29e823b0caf4..cd6402a9aa64 100644
--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
@@ -13,6 +13,7 @@ 
 #include <dt-bindings/reset/qcom,sdm845-aoss.h>
 #include <dt-bindings/soc/qcom,rpmh-rsc.h>
 #include <dt-bindings/clock/qcom,gcc-sdm845.h>
+#include <dt-bindings/thermal/thermal.h>
 
 / {
 	interrupt-parent = <&intc>;
@@ -99,6 +100,7 @@ 
 			compatible = "qcom,kryo385";
 			reg = <0x0 0x0>;
 			enable-method = "psci";
+			#cooling-cells = <2>;
 			next-level-cache = <&L2_0>;
 			L2_0: l2-cache {
 				compatible = "cache";
@@ -114,6 +116,7 @@ 
 			compatible = "qcom,kryo385";
 			reg = <0x0 0x100>;
 			enable-method = "psci";
+			#cooling-cells = <2>;
 			next-level-cache = <&L2_100>;
 			L2_100: l2-cache {
 				compatible = "cache";
@@ -126,6 +129,7 @@ 
 			compatible = "qcom,kryo385";
 			reg = <0x0 0x200>;
 			enable-method = "psci";
+			#cooling-cells = <2>;
 			next-level-cache = <&L2_200>;
 			L2_200: l2-cache {
 				compatible = "cache";
@@ -138,6 +142,7 @@ 
 			compatible = "qcom,kryo385";
 			reg = <0x0 0x300>;
 			enable-method = "psci";
+			#cooling-cells = <2>;
 			next-level-cache = <&L2_300>;
 			L2_300: l2-cache {
 				compatible = "cache";
@@ -150,6 +155,7 @@ 
 			compatible = "qcom,kryo385";
 			reg = <0x0 0x400>;
 			enable-method = "psci";
+			#cooling-cells = <2>;
 			next-level-cache = <&L2_400>;
 			L2_400: l2-cache {
 				compatible = "cache";
@@ -162,6 +168,7 @@ 
 			compatible = "qcom,kryo385";
 			reg = <0x0 0x500>;
 			enable-method = "psci";
+			#cooling-cells = <2>;
 			next-level-cache = <&L2_500>;
 			L2_500: l2-cache {
 				compatible = "cache";
@@ -174,6 +181,7 @@ 
 			compatible = "qcom,kryo385";
 			reg = <0x0 0x600>;
 			enable-method = "psci";
+			#cooling-cells = <2>;
 			next-level-cache = <&L2_600>;
 			L2_600: l2-cache {
 				compatible = "cache";
@@ -186,6 +194,7 @@ 
 			compatible = "qcom,kryo385";
 			reg = <0x0 0x700>;
 			enable-method = "psci";
+			#cooling-cells = <2>;
 			next-level-cache = <&L2_700>;
 			L2_700: l2-cache {
 				compatible = "cache";
@@ -1703,6 +1712,23 @@ 
 					type = "critical";
 				};
 			};
+
+			cooling-maps {
+				map0 {
+					trip = <&cpu_alert0>;
+					cooling-device = <&CPU0 THERMAL_NO_LIMIT 4>,
+							 <&CPU1 THERMAL_NO_LIMIT 4>,
+							 <&CPU2 THERMAL_NO_LIMIT 4>,
+							 <&CPU3 THERMAL_NO_LIMIT 4>;
+				};
+				map1 {
+					trip = <&cpu_crit0>;
+					cooling-device = <&CPU0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&CPU1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&CPU2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&CPU3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+				};
+			};
 		};
 
 		cpu1-thermal {
@@ -1724,6 +1750,23 @@ 
 					type = "critical";
 				};
 			};
+
+			cooling-maps {
+				map0 {
+					trip = <&cpu_alert1>;
+					cooling-device = <&CPU0 THERMAL_NO_LIMIT 4>,
+							 <&CPU1 THERMAL_NO_LIMIT 4>,
+							 <&CPU2 THERMAL_NO_LIMIT 4>,
+							 <&CPU3 THERMAL_NO_LIMIT 4>;
+				};
+				map1 {
+					trip = <&cpu_crit1>;
+					cooling-device = <&CPU0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&CPU1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&CPU2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&CPU3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+				};
+			};
 		};
 
 		cpu2-thermal {
@@ -1745,6 +1788,23 @@ 
 					type = "critical";
 				};
 			};
+
+			cooling-maps {
+				map0 {
+					trip = <&cpu_alert2>;
+					cooling-device = <&CPU0 THERMAL_NO_LIMIT 4>,
+							 <&CPU1 THERMAL_NO_LIMIT 4>,
+							 <&CPU2 THERMAL_NO_LIMIT 4>,
+							 <&CPU3 THERMAL_NO_LIMIT 4>;
+				};
+				map1 {
+					trip = <&cpu_crit2>;
+					cooling-device = <&CPU0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&CPU1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&CPU2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&CPU3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+				};
+			};
 		};
 
 		cpu3-thermal {
@@ -1766,6 +1826,23 @@ 
 					type = "critical";
 				};
 			};
+
+			cooling-maps {
+				map0 {
+					trip = <&cpu_alert3>;
+					cooling-device = <&CPU0 THERMAL_NO_LIMIT 4>,
+							 <&CPU1 THERMAL_NO_LIMIT 4>,
+							 <&CPU2 THERMAL_NO_LIMIT 4>,
+							 <&CPU3 THERMAL_NO_LIMIT 4>;
+				};
+				map1 {
+					trip = <&cpu_crit3>;
+					cooling-device = <&CPU0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&CPU1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&CPU2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&CPU3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+				};
+			};
 		};
 
 		cpu4-thermal {
@@ -1787,6 +1864,23 @@ 
 					type = "critical";
 				};
 			};
+
+			cooling-maps {
+				map0 {
+					trip = <&cpu_alert4>;
+					cooling-device = <&CPU4 THERMAL_NO_LIMIT 4>,
+							 <&CPU5 THERMAL_NO_LIMIT 4>,
+							 <&CPU6 THERMAL_NO_LIMIT 4>,
+							 <&CPU7 THERMAL_NO_LIMIT 4>;
+				};
+				map1 {
+					trip = <&cpu_crit4>;
+					cooling-device = <&CPU4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&CPU5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&CPU6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&CPU7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+				};
+			};
 		};
 
 		cpu5-thermal {
@@ -1808,6 +1902,23 @@ 
 					type = "critical";
 				};
 			};
+
+			cooling-maps {
+				map0 {
+					trip = <&cpu_alert5>;
+					cooling-device = <&CPU4 THERMAL_NO_LIMIT 4>,
+							 <&CPU5 THERMAL_NO_LIMIT 4>,
+							 <&CPU6 THERMAL_NO_LIMIT 4>,
+							 <&CPU7 THERMAL_NO_LIMIT 4>;
+				};
+				map1 {
+					trip = <&cpu_crit5>;
+					cooling-device = <&CPU4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&CPU5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&CPU6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&CPU7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+				};
+			};
 		};
 
 		cpu6-thermal {
@@ -1829,6 +1940,23 @@ 
 					type = "critical";
 				};
 			};
+
+			cooling-maps {
+				map0 {
+					trip = <&cpu_alert6>;
+					cooling-device = <&CPU4 THERMAL_NO_LIMIT 4>,
+							 <&CPU5 THERMAL_NO_LIMIT 4>,
+							 <&CPU6 THERMAL_NO_LIMIT 4>,
+							 <&CPU7 THERMAL_NO_LIMIT 4>;
+				};
+				map1 {
+					trip = <&cpu_crit6>;
+					cooling-device = <&CPU4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&CPU5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&CPU6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&CPU7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+				};
+			};
 		};
 
 		cpu7-thermal {
@@ -1850,6 +1978,23 @@ 
 					type = "critical";
 				};
 			};
+
+			cooling-maps {
+				map0 {
+					trip = <&cpu_alert7>;
+					cooling-device = <&CPU4 THERMAL_NO_LIMIT 4>,
+							 <&CPU5 THERMAL_NO_LIMIT 4>,
+							 <&CPU6 THERMAL_NO_LIMIT 4>,
+							 <&CPU7 THERMAL_NO_LIMIT 4>;
+				};
+				map1 {
+					trip = <&cpu_crit7>;
+					cooling-device = <&CPU4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&CPU5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&CPU6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+							 <&CPU7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+				};
+			};
 		};
 	};
 };