FROMLIST: sched: Introduce a sysctl for Energy Aware Scheduling

In its current state, Energy Aware Scheduling (EAS) starts automatically
on asymmetric platforms having an Energy Model (EM). However, there are
users who want to have an EM (for thermal management for example), but
don't want EAS with it.

In order to let users disable EAS explicitly, introduce a new sysctl
called 'sched_energy_aware'. It is enabled by default so that EAS can
start automatically on platforms where it makes sense. Flipping it to 0
rebuilds the scheduling domains and disables EAS.

Change-Id: I55764e70bf5e90795d2269ec9135ae6e82794a2b
Signed-off-by: Quentin Perret <quentin.perret@arm.com>
Message-Id: <20181016101513.26919-11-quentin.perret@arm.com>
Signed-off-by: Quentin Perret <quentin.perret@arm.com>
This commit is contained in:
Quentin Perret
2018-10-16 11:15:08 +01:00
parent 78814285a9
commit b78eec5fb7
3 changed files with 47 additions and 0 deletions

View File

@@ -83,4 +83,11 @@ extern int sysctl_schedstats(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, void __user *buffer, size_t *lenp,
loff_t *ppos); loff_t *ppos);
#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
extern unsigned int sysctl_sched_energy_aware;
extern int sched_energy_aware_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos);
#endif
#endif /* _LINUX_SCHED_SYSCTL_H */ #endif /* _LINUX_SCHED_SYSCTL_H */

View File

@@ -203,9 +203,35 @@ sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
DEFINE_STATIC_KEY_FALSE(sched_energy_present); DEFINE_STATIC_KEY_FALSE(sched_energy_present);
#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) #if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
unsigned int sysctl_sched_energy_aware = 1;
DEFINE_MUTEX(sched_energy_mutex); DEFINE_MUTEX(sched_energy_mutex);
bool sched_energy_update; bool sched_energy_update;
#ifdef CONFIG_PROC_SYSCTL
int sched_energy_aware_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
int ret, state;
if (write && !capable(CAP_SYS_ADMIN))
return -EPERM;
ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
if (!ret && write) {
state = static_branch_unlikely(&sched_energy_present);
if (state != sysctl_sched_energy_aware) {
mutex_lock(&sched_energy_mutex);
sched_energy_update = 1;
rebuild_sched_domains();
sched_energy_update = 0;
mutex_unlock(&sched_energy_mutex);
}
}
return ret;
}
#endif
static void free_pd(struct perf_domain *pd) static void free_pd(struct perf_domain *pd)
{ {
struct perf_domain *tmp; struct perf_domain *tmp;
@@ -338,6 +364,9 @@ static void build_perf_domains(const struct cpumask *cpu_map)
struct cpufreq_policy *policy; struct cpufreq_policy *policy;
struct cpufreq_governor *gov; struct cpufreq_governor *gov;
if (!sysctl_sched_energy_aware)
goto free;
/* EAS is enabled for asymmetric CPU capacity topologies. */ /* EAS is enabled for asymmetric CPU capacity topologies. */
if (!per_cpu(sd_asym_cpucapacity, cpu)) { if (!per_cpu(sd_asym_cpucapacity, cpu)) {
if (sched_debug()) { if (sched_debug()) {

View File

@@ -466,6 +466,17 @@ static struct ctl_table kern_table[] = {
.extra1 = &one, .extra1 = &one,
}, },
#endif #endif
#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
{
.procname = "sched_energy_aware",
.data = &sysctl_sched_energy_aware,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = sched_energy_aware_handler,
.extra1 = &zero,
.extra2 = &one,
},
#endif
#ifdef CONFIG_PROVE_LOCKING #ifdef CONFIG_PROVE_LOCKING
{ {
.procname = "prove_locking", .procname = "prove_locking",