Message ID | 1533657387-29039-4-git-send-email-vincent.guittot@linaro.org |
---|---|
State | Superseded |
Headers | show |
Series | sched/fair: some fixes for asym_packing | expand |
On Tue, Aug 07, 2018 at 05:56:27PM +0200, Vincent Guittot wrote: > +static inline bool > +asym_active_balance(enum cpu_idle_type idle, unsigned int flags, int dst, int src) > { > + if (idle != CPU_NOT_IDLE) { > > /* > * ASYM_PACKING needs to force migrate tasks from busy but > * lower priority CPUs in order to pack all tasks in the > * highest priority CPUs. > */ > + if ((flags & SD_ASYM_PACKING) && > + sched_asym_prefer(dst, src)) > + return true; > } > > + return false; > +} > + > +static int need_active_balance(struct lb_env *env) > +{ > + struct sched_domain *sd = env->sd; > + > + > + if (asym_active_balance(env->idle, sd->flags, env->dst_cpu, env->src_cpu)) > + return 1; > + > /* > * The dst_cpu is idle and the src_cpu CPU has only 1 CFS task. > * It's worth migrating the task if the src_cpu's capacity is reduced > @@ -8650,7 +8660,8 @@ static int load_balance(int this_cpu, struct rq *this_rq, > } else > sd->nr_balance_failed = 0; > > + if (likely(!active_balance) || > + asym_active_balance(env.idle, sd->flags, env.dst_cpu, env.src_cpu)) { Perhaps like the below? --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -8857,21 +8857,24 @@ static struct rq *find_busiest_queue(str */ #define MAX_PINNED_INTERVAL 512 +static inline bool +asym_active_balance(struct lb_env *env) +{ + /* + * ASYM_PACKING needs to force migrate tasks from busy but + * lower priority CPUs in order to pack all tasks in the + * highest priority CPUs. + */ + return env->idle != CPU_NOT_IDLE && (env->sd->flags & SD_ASYM_PACKING) && + sched_asym_prefer(env->dst_cpu, env->src_cpu); +} + static int need_active_balance(struct lb_env *env) { struct sched_domain *sd = env->sd; - if (env->idle != CPU_NOT_IDLE) { - - /* - * ASYM_PACKING needs to force migrate tasks from busy but - * lower priority CPUs in order to pack all tasks in the - * highest priority CPUs. - */ - if ((sd->flags & SD_ASYM_PACKING) && - sched_asym_prefer(env->dst_cpu, env->src_cpu)) - return 1; - } + if (asym_active_balance(env)) + return 1; /* * The dst_cpu is idle and the src_cpu CPU has only 1 CFS task. @@ -9150,7 +9153,7 @@ static int load_balance(int this_cpu, st } else sd->nr_balance_failed = 0; - if (likely(!active_balance)) { + if (likely(!active_balance) || asym_active_balance(&env)) { /* We were unbalanced, so reset the balancing interval */ sd->balance_interval = sd->min_interval; } else {
On Thu, 13 Dec 2018 at 14:52, Peter Zijlstra <peterz@infradead.org> wrote: > > On Tue, Aug 07, 2018 at 05:56:27PM +0200, Vincent Guittot wrote: > > +static inline bool > > +asym_active_balance(enum cpu_idle_type idle, unsigned int flags, int dst, int src) > > { > > + if (idle != CPU_NOT_IDLE) { > > > > /* > > * ASYM_PACKING needs to force migrate tasks from busy but > > * lower priority CPUs in order to pack all tasks in the > > * highest priority CPUs. > > */ > > + if ((flags & SD_ASYM_PACKING) && > > + sched_asym_prefer(dst, src)) > > + return true; > > } > > > > + return false; > > +} > > + > > +static int need_active_balance(struct lb_env *env) > > +{ > > + struct sched_domain *sd = env->sd; > > + > > + > > + if (asym_active_balance(env->idle, sd->flags, env->dst_cpu, env->src_cpu)) > > + return 1; > > + > > /* > > * The dst_cpu is idle and the src_cpu CPU has only 1 CFS task. > > * It's worth migrating the task if the src_cpu's capacity is reduced > > @@ -8650,7 +8660,8 @@ static int load_balance(int this_cpu, struct rq *this_rq, > > } else > > sd->nr_balance_failed = 0; > > > > + if (likely(!active_balance) || > > + asym_active_balance(env.idle, sd->flags, env.dst_cpu, env.src_cpu)) { > > Perhaps like the below? Yes. Far more simple and readable > > --- a/kernel/sched/fair.c > +++ b/kernel/sched/fair.c > @@ -8857,21 +8857,24 @@ static struct rq *find_busiest_queue(str > */ > #define MAX_PINNED_INTERVAL 512 > > +static inline bool > +asym_active_balance(struct lb_env *env) > +{ > + /* > + * ASYM_PACKING needs to force migrate tasks from busy but > + * lower priority CPUs in order to pack all tasks in the > + * highest priority CPUs. > + */ > + return env->idle != CPU_NOT_IDLE && (env->sd->flags & SD_ASYM_PACKING) && > + sched_asym_prefer(env->dst_cpu, env->src_cpu); > +} > + > static int need_active_balance(struct lb_env *env) > { > struct sched_domain *sd = env->sd; > > - if (env->idle != CPU_NOT_IDLE) { > - > - /* > - * ASYM_PACKING needs to force migrate tasks from busy but > - * lower priority CPUs in order to pack all tasks in the > - * highest priority CPUs. > - */ > - if ((sd->flags & SD_ASYM_PACKING) && > - sched_asym_prefer(env->dst_cpu, env->src_cpu)) > - return 1; > - } > + if (asym_active_balance(env)) > + return 1; > > /* > * The dst_cpu is idle and the src_cpu CPU has only 1 CFS task. > @@ -9150,7 +9153,7 @@ static int load_balance(int this_cpu, st > } else > sd->nr_balance_failed = 0; > > - if (likely(!active_balance)) { > + if (likely(!active_balance) || asym_active_balance(&env)) { > /* We were unbalanced, so reset the balancing interval */ > sd->balance_interval = sd->min_interval; > } else {
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 5f1b6c6..ceb6bed 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -8360,22 +8360,32 @@ static struct rq *find_busiest_queue(struct lb_env *env, */ #define MAX_PINNED_INTERVAL 512 -static int need_active_balance(struct lb_env *env) +static inline bool +asym_active_balance(enum cpu_idle_type idle, unsigned int flags, int dst, int src) { - struct sched_domain *sd = env->sd; - - if (env->idle != CPU_NOT_IDLE) { + if (idle != CPU_NOT_IDLE) { /* * ASYM_PACKING needs to force migrate tasks from busy but * lower priority CPUs in order to pack all tasks in the * highest priority CPUs. */ - if ((sd->flags & SD_ASYM_PACKING) && - sched_asym_prefer(env->dst_cpu, env->src_cpu)) - return 1; + if ((flags & SD_ASYM_PACKING) && + sched_asym_prefer(dst, src)) + return true; } + return false; +} + +static int need_active_balance(struct lb_env *env) +{ + struct sched_domain *sd = env->sd; + + + if (asym_active_balance(env->idle, sd->flags, env->dst_cpu, env->src_cpu)) + return 1; + /* * The dst_cpu is idle and the src_cpu CPU has only 1 CFS task. * It's worth migrating the task if the src_cpu's capacity is reduced @@ -8650,7 +8660,8 @@ static int load_balance(int this_cpu, struct rq *this_rq, } else sd->nr_balance_failed = 0; - if (likely(!active_balance)) { + if (likely(!active_balance) || + asym_active_balance(env.idle, sd->flags, env.dst_cpu, env.src_cpu)) { /* We were unbalanced, so reset the balancing interval */ sd->balance_interval = sd->min_interval; } else {
In case of active balance, we increase the balance interval to cover pinned tasks cases not covered by all_pinned logic. Neverthless, the active migration triggered by asym packing should be treated as the normal unbalanced case and reset the interval to default value otherwise active migration for asym_packing can be easily delayed for hundreds of ms because of this all_pinned detection mecanism. Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org> --- kernel/sched/fair.c | 27 +++++++++++++++++++-------- 1 file changed, 19 insertions(+), 8 deletions(-) -- 2.7.4