@@ -44,6 +44,14 @@ enum sched_tunable_scaling {
};
extern enum sched_tunable_scaling sysctl_sched_tunable_scaling;
+#ifdef CONFIG_SCHED_PACKING_TASKS
+extern int __read_mostly sysctl_sched_packing_level;
+
+int sched_proc_update_packing(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos);
+#endif
+
extern unsigned int sysctl_numa_balancing_scan_delay;
extern unsigned int sysctl_numa_balancing_scan_period_min;
extern unsigned int sysctl_numa_balancing_scan_period_max;
@@ -61,6 +69,7 @@ extern unsigned int sysctl_sched_shares_window;
int sched_proc_update_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *length,
loff_t *ppos);
+
#endif
#ifdef CONFIG_SCHED_DEBUG
static inline unsigned int get_sysctl_timer_migration(void)
@@ -186,6 +186,32 @@ void sched_init_granularity(void)
*/
DEFINE_PER_CPU(int, sd_pack_buddy);
+/*
+ * The packing level of the scheduler
+ *
+ * This level define the activity % above which we should add another CPU to
+ * participate to the packing effort of the tasks
+ */
+#define DEFAULT_PACKING_LEVEL 80
+int __read_mostly sysctl_sched_packing_level = DEFAULT_PACKING_LEVEL;
+
+unsigned int sd_pack_threshold = (100 * 1024) / DEFAULT_PACKING_LEVEL;
+
+
+int sched_proc_update_packing(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos)
+{
+ int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+ if (ret || !write)
+ return ret;
+
+ if (sysctl_sched_packing_level)
+ sd_pack_threshold = (100 * 1024) / sysctl_sched_packing_level;
+
+ return 0;
+}
+
static inline bool is_packing_cpu(int cpu)
{
int my_buddy = per_cpu(sd_pack_buddy, cpu);
@@ -255,11 +255,17 @@ static struct ctl_table sysctl_base_table[] = {
{ }
};
+#ifdef CONFIG_SCHED_PACKING_TASKS
+static int min_sched_packing_level;
+static int max_sched_packing_level = 100;
+#endif /* CONFIG_SMP */
+
#ifdef CONFIG_SCHED_DEBUG
static int min_sched_granularity_ns = 100000; /* 100 usecs */
static int max_sched_granularity_ns = NSEC_PER_SEC; /* 1 second */
static int min_wakeup_granularity_ns; /* 0 usecs */
static int max_wakeup_granularity_ns = NSEC_PER_SEC; /* 1 second */
+
#ifdef CONFIG_SMP
static int min_sched_tunable_scaling = SCHED_TUNABLESCALING_NONE;
static int max_sched_tunable_scaling = SCHED_TUNABLESCALING_END-1;
@@ -279,6 +285,17 @@ static struct ctl_table kern_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec,
},
+#ifdef CONFIG_SCHED_PACKING_TASKS
+ {
+ .procname = "sched_packing_level",
+ .data = &sysctl_sched_packing_level,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = sched_proc_update_packing,
+ .extra1 = &min_sched_packing_level,
+ .extra2 = &max_sched_packing_level,
+ },
+#endif
#ifdef CONFIG_SCHED_DEBUG
{
.procname = "sched_min_granularity_ns",
The knob is used to set an average load threshold that will be used to trig the inclusion/removal of CPUs in the packing effort list. Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org> --- include/linux/sched/sysctl.h | 9 +++++++++ kernel/sched/fair.c | 26 ++++++++++++++++++++++++++ kernel/sysctl.c | 17 +++++++++++++++++ 3 files changed, 52 insertions(+)