@@ -120,6 +120,12 @@ void rcu_init(void);
extern int rcu_scheduler_active;
void rcu_sched_clock_irq(int user);
+#ifdef CONFIG_RCU_NOCB_CPU
+int rcu_nocb_enabled(struct cpumask *out_mask);
+#else
+static inline int rcu_nocb_enabled(struct cpumask *out_mask) { return 0; }
+#endif
+
#ifdef CONFIG_TASKS_RCU_GENERIC
void rcu_init_tasks_generic(void);
#else
@@ -81,6 +81,18 @@ static int __init parse_rcu_nocb_poll(char *arg)
}
__setup("rcu_nocb_poll", parse_rcu_nocb_poll);
+/*
+ * Return the rcu_nocb state & optionally copy out rcu_nocb_mask.
+ */
+int rcu_nocb_enabled(struct cpumask *out_mask)
+{
+ if (!rcu_state.nocb_is_setup)
+ return 0;
+ if (out_mask)
+ cpumask_copy(out_mask, rcu_nocb_mask);
+ return 1;
+}
+
/*
* Don't bother bypassing ->cblist if the call_rcu() rate is low.
* After all, the main point of bypassing is to avoid lock contention
Add a new rcu_nocb_enabled() helper to expose the rcu_nocb state to other kernel subsystems like cpuset. That will allow cpuset to determine if RCU no-callback can be enabled on isolated CPUs within isolated partitions. If so, the corresponding RCU functions can be called to enable it when full CPU isolation is requested. Signed-off-by: Waiman Long <longman@redhat.com> --- include/linux/rcupdate.h | 6 ++++++ kernel/rcu/tree_nocb.h | 12 ++++++++++++ 2 files changed, 18 insertions(+)