diff mbox series

[RFC,4/8] sched/cpufreq_schedutil: split utilization signals

Message ID 20170523085351.18586-5-juri.lelli@arm.com
State New
Headers show
Series SCHED_DEADLINE freq/cpu invariance and OPP selection | expand

Commit Message

Juri Lelli May 23, 2017, 8:53 a.m. UTC
To be able to treat utilization signals of different scheduling classes
in different ways (e.g., CFS signal might be stale while DEADLINE signal
is never stale by design) we need to split sugov_cpu::util signal in two:
util_cfs and util_dl.

This patch does that by also changing sugov_get_util() parameter list.
After this change aggregation of the different signals has to be performed
by sugov_get_util() users (so that they can decide what to do with the
different signals).

Suggested-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Signed-off-by: Juri Lelli <juri.lelli@arm.com>

Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Cc: Viresh Kumar <viresh.kumar@linaro.org>
Cc: Luca Abeni <luca.abeni@santannapisa.it>
Cc: Claudio Scordino <claudio@evidence.eu.com>
---
 kernel/sched/cpufreq_schedutil.c | 23 +++++++++++------------
 1 file changed, 11 insertions(+), 12 deletions(-)

-- 
2.11.0

Comments

Peter Zijlstra May 24, 2017, 7:01 a.m. UTC | #1
On Wed, May 24, 2017 at 01:30:36AM +0200, Rafael J. Wysocki wrote:
> On Tuesday, May 23, 2017 09:29:27 PM Peter Zijlstra wrote:

> > On Tue, May 23, 2017 at 09:53:47AM +0100, Juri Lelli wrote:

> > > To be able to treat utilization signals of different scheduling classes

> > > in different ways (e.g., CFS signal might be stale while DEADLINE signal

> > > is never stale by design) we need to split sugov_cpu::util signal in two:

> > > util_cfs and util_dl.

> > > 

> > > This patch does that by also changing sugov_get_util() parameter list.

> > > After this change aggregation of the different signals has to be performed

> > > by sugov_get_util() users (so that they can decide what to do with the

> > > different signals).

> > 

> > So what I don't see this patch doing; and I don't remember if cpufreq is

> > ready for this at all, is set the util_dl as min/guaranteed freq and

> > util_cfs+util_dl as requested freq.

> 

> I'm totally unsure what you mean here.


I was thinking of the CPPC/HWP stuff, where you can set different
frequencies with different levels of guarantees.

We'd want to set util_dl as the minimum (guaranteed) performance, and
util_dl + util_cfs as the desired performance level.

> cpufreq doesn't have a "guaranteed frequency" concept of any sort right now.


I was afraid of that ;-) I think we want a comment in the code stating
that this is the desired goal though. Then once cpufreq is ready to deal
with it we can change it..
Juri Lelli May 24, 2017, 9:01 a.m. UTC | #2
Hi,

On 24/05/17 09:01, Peter Zijlstra wrote:
> On Wed, May 24, 2017 at 01:30:36AM +0200, Rafael J. Wysocki wrote:

> > On Tuesday, May 23, 2017 09:29:27 PM Peter Zijlstra wrote:

> > > On Tue, May 23, 2017 at 09:53:47AM +0100, Juri Lelli wrote:

> > > > To be able to treat utilization signals of different scheduling classes

> > > > in different ways (e.g., CFS signal might be stale while DEADLINE signal

> > > > is never stale by design) we need to split sugov_cpu::util signal in two:

> > > > util_cfs and util_dl.

> > > > 

> > > > This patch does that by also changing sugov_get_util() parameter list.

> > > > After this change aggregation of the different signals has to be performed

> > > > by sugov_get_util() users (so that they can decide what to do with the

> > > > different signals).

> > > 

> > > So what I don't see this patch doing; and I don't remember if cpufreq is

> > > ready for this at all, is set the util_dl as min/guaranteed freq and

> > > util_cfs+util_dl as requested freq.

> > 

> > I'm totally unsure what you mean here.

> 

> I was thinking of the CPPC/HWP stuff, where you can set different

> frequencies with different levels of guarantees.

> 

> We'd want to set util_dl as the minimum (guaranteed) performance, and

> util_dl + util_cfs as the desired performance level.

> 

> > cpufreq doesn't have a "guaranteed frequency" concept of any sort right now.

> 

> I was afraid of that ;-) I think we want a comment in the code stating

> that this is the desired goal though. Then once cpufreq is ready to deal

> with it we can change it..


Sure, I can add that in next version.

Thanks,

- Juri
Juri Lelli May 24, 2017, 9:01 a.m. UTC | #3
Hi,

On 23/05/17 21:04, Peter Zijlstra wrote:
> On Tue, May 23, 2017 at 09:53:47AM +0100, Juri Lelli wrote:

> > @@ -157,14 +158,13 @@ static unsigned int get_next_freq(struct sugov_policy *sg_policy,

> >  	return cpufreq_driver_resolve_freq(policy, freq);

> >  }

> >  

> > -static void sugov_get_util(unsigned long *util, unsigned long *max)

> > +static void sugov_get_util(struct sugov_cpu *sg_cpu)

> >  {

> >  	struct rq *rq = this_rq();

> > -	unsigned long dl_util = (rq->dl.running_bw * SCHED_CAPACITY_SCALE) >> 20;

> >  

> > -	*max = arch_scale_cpu_capacity(NULL, smp_processor_id());

> > -

> > -	*util = min(rq->cfs.avg.util_avg + dl_util, *max);

> > +	sg_cpu->max = arch_scale_cpu_capacity(NULL, smp_processor_id());

> > +	sg_cpu->util_cfs = rq->cfs.avg.util_avg;

> > +	sg_cpu->util_dl = (rq->dl.running_bw * SCHED_CAPACITY_SCALE) >> 20;

> >  }

> 

> Luca just introduced a nice BW_SHIFT for that '20' thing.


Right, will use that.

Thanks,

- Juri
diff mbox series

Patch

diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index 1508109c7f19..f930cec4c3d4 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -58,7 +58,8 @@  struct sugov_cpu {
 	u64 last_update;
 
 	/* The fields below are only needed when sharing a policy. */
-	unsigned long util;
+	unsigned long util_cfs;
+	unsigned long util_dl;
 	unsigned long max;
 	unsigned int flags;
 
@@ -157,14 +158,13 @@  static unsigned int get_next_freq(struct sugov_policy *sg_policy,
 	return cpufreq_driver_resolve_freq(policy, freq);
 }
 
-static void sugov_get_util(unsigned long *util, unsigned long *max)
+static void sugov_get_util(struct sugov_cpu *sg_cpu)
 {
 	struct rq *rq = this_rq();
-	unsigned long dl_util = (rq->dl.running_bw * SCHED_CAPACITY_SCALE) >> 20;
 
-	*max = arch_scale_cpu_capacity(NULL, smp_processor_id());
-
-	*util = min(rq->cfs.avg.util_avg + dl_util, *max);
+	sg_cpu->max = arch_scale_cpu_capacity(NULL, smp_processor_id());
+	sg_cpu->util_cfs = rq->cfs.avg.util_avg;
+	sg_cpu->util_dl = (rq->dl.running_bw * SCHED_CAPACITY_SCALE) >> 20;
 }
 
 static void sugov_set_iowait_boost(struct sugov_cpu *sg_cpu, u64 time,
@@ -231,7 +231,9 @@  static void sugov_update_single(struct update_util_data *hook, u64 time,
 	if (flags & SCHED_CPUFREQ_RT) {
 		next_f = policy->cpuinfo.max_freq;
 	} else {
-		sugov_get_util(&util, &max);
+		sugov_get_util(sg_cpu);
+		max = sg_cpu->max;
+		util = min(sg_cpu->util_cfs + sg_cpu->util_dl, max);
 		sugov_iowait_boost(sg_cpu, &util, &max);
 		next_f = get_next_freq(sg_policy, util, max);
 		/*
@@ -271,8 +273,8 @@  static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
 		if (j_sg_cpu->flags & SCHED_CPUFREQ_RT)
 			return policy->cpuinfo.max_freq;
 
-		j_util = j_sg_cpu->util;
 		j_max = j_sg_cpu->max;
+		j_util = min(j_sg_cpu->util_cfs + j_sg_cpu->util_dl, j_max);
 		if (j_util * max > j_max * util) {
 			util = j_util;
 			max = j_max;
@@ -289,15 +291,12 @@  static void sugov_update_shared(struct update_util_data *hook, u64 time,
 {
 	struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
 	struct sugov_policy *sg_policy = sg_cpu->sg_policy;
-	unsigned long util, max;
 	unsigned int next_f;
 
-	sugov_get_util(&util, &max);
 
 	raw_spin_lock(&sg_policy->update_lock);
 
-	sg_cpu->util = util;
-	sg_cpu->max = max;
+	sugov_get_util(sg_cpu);
 	sg_cpu->flags = flags;
 
 	sugov_set_iowait_boost(sg_cpu, time, flags);