ANDROID: sched: consider stune boost margin when computing energy
If CONFIG_SCHED_TUNE is enabled, it does not use boosted cpu util to compute energy, so it could not reflect the real freq when a cpu has boosted tasks on it. Addressing it by adding boost margin if type is FREQUENCY_UTIL in schedutil_cpu_util(). Bug: 158637636 Signed-off-by: Rick Yiu <rickyiu@google.com> Change-Id: I570920cb1e67d07de87006fca058d50e9358b7cd
This commit is contained in:
@@ -219,6 +219,9 @@ static unsigned int get_next_freq(struct sugov_policy *sg_policy,
|
||||
return cpufreq_driver_resolve_freq(policy, freq);
|
||||
}
|
||||
|
||||
extern long
|
||||
schedtune_cpu_margin_with(unsigned long util, int cpu, struct task_struct *p);
|
||||
|
||||
/*
|
||||
* This function computes an effective utilization for the given CPU, to be
|
||||
* used for frequency selection given the linear relation: f = u * f_max.
|
||||
@@ -277,7 +280,11 @@ unsigned long schedutil_cpu_util(int cpu, unsigned long util_cfs,
|
||||
*/
|
||||
util = util_cfs + cpu_util_rt(rq);
|
||||
if (type == FREQUENCY_UTIL)
|
||||
#ifdef CONFIG_SCHED_TUNE
|
||||
util += schedtune_cpu_margin_with(util, cpu, p);
|
||||
#else
|
||||
util = uclamp_rq_util_with(rq, util, p);
|
||||
#endif
|
||||
|
||||
dl_util = cpu_util_dl(rq);
|
||||
|
||||
@@ -331,12 +338,7 @@ unsigned long schedutil_cpu_util(int cpu, unsigned long util_cfs,
|
||||
static unsigned long sugov_get_util(struct sugov_cpu *sg_cpu)
|
||||
{
|
||||
struct rq *rq = cpu_rq(sg_cpu->cpu);
|
||||
#ifdef CONFIG_SCHED_TUNE
|
||||
unsigned long util = stune_util(sg_cpu->cpu, cpu_util_rt(rq));
|
||||
#else
|
||||
unsigned long util = cpu_util_freq(sg_cpu->cpu);
|
||||
#endif
|
||||
unsigned long util_cfs = util - cpu_util_rt(rq);
|
||||
unsigned long util_cfs = cpu_util_cfs(rq);
|
||||
unsigned long max = arch_scale_cpu_capacity(NULL, sg_cpu->cpu);
|
||||
|
||||
sg_cpu->max = max;
|
||||
|
||||
@@ -5926,15 +5926,20 @@ schedtune_margin(unsigned long signal, long boost)
|
||||
return margin;
|
||||
}
|
||||
|
||||
static inline int
|
||||
schedtune_cpu_margin(unsigned long util, int cpu)
|
||||
inline long
|
||||
schedtune_cpu_margin_with(unsigned long util, int cpu, struct task_struct *p)
|
||||
{
|
||||
int boost = schedtune_cpu_boost(cpu);
|
||||
int boost = schedtune_cpu_boost_with(cpu, p);
|
||||
long margin;
|
||||
|
||||
if (boost == 0)
|
||||
return 0;
|
||||
margin = 0;
|
||||
else
|
||||
margin = schedtune_margin(util, boost);
|
||||
|
||||
return schedtune_margin(util, boost);
|
||||
trace_sched_boost_cpu(cpu, util, margin);
|
||||
|
||||
return margin;
|
||||
}
|
||||
|
||||
long schedtune_task_margin(struct task_struct *task)
|
||||
@@ -5952,22 +5957,10 @@ long schedtune_task_margin(struct task_struct *task)
|
||||
return margin;
|
||||
}
|
||||
|
||||
unsigned long
|
||||
stune_util(int cpu, unsigned long other_util)
|
||||
{
|
||||
unsigned long util = min_t(unsigned long, SCHED_CAPACITY_SCALE,
|
||||
cpu_util_cfs(cpu_rq(cpu)) + other_util);
|
||||
long margin = schedtune_cpu_margin(util, cpu);
|
||||
|
||||
trace_sched_boost_cpu(cpu, util, margin);
|
||||
|
||||
return util + margin;
|
||||
}
|
||||
|
||||
#else /* CONFIG_SCHED_TUNE */
|
||||
|
||||
static inline int
|
||||
schedtune_cpu_margin(unsigned long util, int cpu)
|
||||
inline long
|
||||
schedtune_cpu_margin_with(unsigned long util, int cpu, struct task_struct *p)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -2436,13 +2436,6 @@ static inline unsigned long cpu_util_rt(struct rq *rq)
|
||||
return READ_ONCE(rq->avg_rt.util_avg);
|
||||
}
|
||||
|
||||
static inline unsigned long cpu_util_freq(int cpu)
|
||||
{
|
||||
struct rq *rq = cpu_rq(cpu);
|
||||
|
||||
return min(cpu_util_cfs(rq) + cpu_util_rt(rq), capacity_orig_of(cpu));
|
||||
}
|
||||
|
||||
#else /* CONFIG_CPU_FREQ_GOV_SCHEDUTIL */
|
||||
static inline unsigned long schedutil_cpu_util(int cpu, unsigned long util_cfs,
|
||||
unsigned long max, enum schedutil_type type,
|
||||
|
||||
@@ -460,10 +460,11 @@ void schedtune_dequeue_task(struct task_struct *p, int cpu)
|
||||
raw_spin_unlock_irqrestore(&bg->lock, irq_flags);
|
||||
}
|
||||
|
||||
int schedtune_cpu_boost(int cpu)
|
||||
int schedtune_cpu_boost_with(int cpu, struct task_struct *p)
|
||||
{
|
||||
struct boost_groups *bg;
|
||||
u64 now;
|
||||
int task_boost = p ? schedtune_task_boost(p) : -100;
|
||||
|
||||
bg = &per_cpu(cpu_boost_groups, cpu);
|
||||
now = sched_clock_cpu(cpu);
|
||||
@@ -472,7 +473,7 @@ int schedtune_cpu_boost(int cpu)
|
||||
if (schedtune_boost_timeout(now, bg->boost_ts))
|
||||
schedtune_cpu_update(cpu, now);
|
||||
|
||||
return bg->boost_max;
|
||||
return max(bg->boost_max, task_boost);
|
||||
}
|
||||
|
||||
int schedtune_task_boost(struct task_struct *p)
|
||||
|
||||
@@ -12,7 +12,7 @@ struct target_nrg {
|
||||
struct reciprocal_value rdiv;
|
||||
};
|
||||
|
||||
int schedtune_cpu_boost(int cpu);
|
||||
int schedtune_cpu_boost_with(int cpu, struct task_struct *p);
|
||||
int schedtune_task_boost(struct task_struct *tsk);
|
||||
|
||||
int schedtune_prefer_idle(struct task_struct *tsk);
|
||||
@@ -20,11 +20,9 @@ int schedtune_prefer_idle(struct task_struct *tsk);
|
||||
void schedtune_enqueue_task(struct task_struct *p, int cpu);
|
||||
void schedtune_dequeue_task(struct task_struct *p, int cpu);
|
||||
|
||||
unsigned long stune_util(int cpu, unsigned long other_util);
|
||||
|
||||
#else /* CONFIG_SCHED_TUNE */
|
||||
|
||||
#define schedtune_cpu_boost(cpu) 0
|
||||
#define schedtune_cpu_boost_with(cpu, p) 0
|
||||
#define schedtune_task_boost(tsk) 0
|
||||
|
||||
#define schedtune_prefer_idle(tsk) 0
|
||||
|
||||
Reference in New Issue
Block a user