diff mbox series

[v2,2/3] sched/fair: Enable interrupts when dropping lock in newidle_balance()

Message ID 20210428232821.2506201-3-swood@redhat.com
State New
Headers show
Series newidle_balance() PREEMPT_RT latency mitigations | expand

Commit Message

Crystal Wood April 28, 2021, 11:28 p.m. UTC
When combined with the next patch, which breaks out of rebalancing
when an RT task is runnable, significant latency reductions are seen
on systems with many CPUs.

Signed-off-by: Scott Wood <swood@redhat.com>
---
 kernel/sched/fair.c | 8 ++++++++
 1 file changed, 8 insertions(+)
diff mbox series

Patch

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index ff369c38a5b5..aa8c87b6aff8 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -10521,6 +10521,8 @@  static void nohz_newidle_balance(struct rq *this_rq)
 		return;
 
 	raw_spin_unlock(&this_rq->lock);
+	if (newidle_balance_in_callback)
+		local_irq_enable();
 	/*
 	 * This CPU is going to be idle and blocked load of idle CPUs
 	 * need to be updated. Run the ilb locally as it is a good
@@ -10529,6 +10531,8 @@  static void nohz_newidle_balance(struct rq *this_rq)
 	 */
 	if (!_nohz_idle_balance(this_rq, NOHZ_STATS_KICK, CPU_NEWLY_IDLE))
 		kick_ilb(NOHZ_STATS_KICK);
+	if (newidle_balance_in_callback)
+		local_irq_disable();
 	raw_spin_lock(&this_rq->lock);
 }
 
@@ -10599,6 +10603,8 @@  static int do_newidle_balance(struct rq *this_rq, struct rq_flags *rf)
 	}
 
 	raw_spin_unlock(&this_rq->lock);
+	if (newidle_balance_in_callback)
+		local_irq_enable();
 
 	update_blocked_averages(this_cpu);
 	rcu_read_lock();
@@ -10636,6 +10642,8 @@  static int do_newidle_balance(struct rq *this_rq, struct rq_flags *rf)
 	}
 	rcu_read_unlock();
 
+	if (newidle_balance_in_callback)
+		local_irq_disable();
 	raw_spin_lock(&this_rq->lock);
 
 	if (curr_cost > this_rq->max_idle_balance_cost)