===================================================================
@@ -235,8 +235,6 @@ struct pci_bus;
int x86_pci_root_bus_node(int bus);
void x86_pci_root_bus_resources(int bus, struct list_head *resources);
-extern bool x86_topology_update;
-
#ifdef CONFIG_SCHED_MC_PRIO
#include <asm/percpu.h>
@@ -284,9 +282,13 @@ static inline long arch_scale_freq_capac
extern void arch_set_max_freq_ratio(bool turbo_disabled);
extern void freq_invariance_set_perf_ratio(u64 ratio, bool turbo_disabled);
+
+void arch_rebuild_sched_domains(void);
#else
static inline void arch_set_max_freq_ratio(bool turbo_disabled) { }
static inline void freq_invariance_set_perf_ratio(u64 ratio, bool turbo_disabled) { }
+
+static inline void arch_rebuild_sched_domains(void) { }
#endif
extern void arch_scale_freq_tick(void);
===================================================================
@@ -54,10 +54,8 @@ static int sched_itmt_update_handler(str
old_sysctl = sysctl_sched_itmt_enabled;
ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
- if (!ret && write && old_sysctl != sysctl_sched_itmt_enabled) {
- x86_topology_update = true;
- rebuild_sched_domains();
- }
+ if (!ret && write && old_sysctl != sysctl_sched_itmt_enabled)
+ arch_rebuild_sched_domains();
mutex_unlock(&itmt_update_mutex);
@@ -114,8 +112,7 @@ int sched_set_itmt_support(void)
sysctl_sched_itmt_enabled = 1;
- x86_topology_update = true;
- rebuild_sched_domains();
+ arch_rebuild_sched_domains();
mutex_unlock(&itmt_update_mutex);
@@ -150,8 +147,7 @@ void sched_clear_itmt_support(void)
if (sysctl_sched_itmt_enabled) {
/* disable sched_itmt if we are no longer ITMT capable */
sysctl_sched_itmt_enabled = 0;
- x86_topology_update = true;
- rebuild_sched_domains();
+ arch_rebuild_sched_domains();
}
mutex_unlock(&itmt_update_mutex);
===================================================================
@@ -39,6 +39,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/cpuset.h>
#include <linux/init.h>
#include <linux/smp.h>
#include <linux/export.h>
@@ -125,7 +126,7 @@ static DEFINE_PER_CPU_ALIGNED(struct mwa
int __read_mostly __max_smt_threads = 1;
/* Flag to indicate if a complete sched domain rebuild is required */
-bool x86_topology_update;
+static bool x86_topology_update;
int arch_update_cpu_topology(void)
{
@@ -135,6 +136,13 @@ int arch_update_cpu_topology(void)
return retval;
}
+#ifdef CONFIG_X86_64
+void arch_rebuild_sched_domains(void) {
+ x86_topology_update = true;
+ rebuild_sched_domains();
+}
+#endif
+
static unsigned int smpboot_warm_reset_vector_count;
static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip)