sched: avoid migrating when softint on tgt cpu should be short
The scheduling change (bug 31501544) to avoid putting RT threads on cores that are handling softint's was catching cases where there was no reason to believe the softint would take a long time, resulting in unnecessary migration overhead. This patch reduces the migration to cases where the core has a softint that is actually likely to take a long time, as opposed to the RCU, SCHED, and TIMER softints that are rather quick. Bug: 31752786 Change-Id: Ib4e179f1e15c736b2fdba31070494e357e9fbbe2 Git-commit: ce05770bd37b8065b61ef650108ecef2b97b148b Git-repo: https://android.googlesource.com/kernel/msm [pkondeti@codeaurora.org: resolved minor merge conflicts] Signed-off-by: Pavankumar Kondeti <pkondeti@codeaurora.org> Signed-off-by: Satya Durga Srinivasu Prabhala <satyap@codeaurora.org>
This commit is contained in:
committed by
Satya Durga Srinivasu Prabhala
parent
54869f74a2
commit
615d2fba03
@@ -478,6 +478,12 @@ enum
|
|||||||
};
|
};
|
||||||
|
|
||||||
#define SOFTIRQ_STOP_IDLE_MASK (~(1 << RCU_SOFTIRQ))
|
#define SOFTIRQ_STOP_IDLE_MASK (~(1 << RCU_SOFTIRQ))
|
||||||
|
/* Softirq's where the handling might be long: */
|
||||||
|
#define LONG_SOFTIRQ_MASK ((1 << NET_TX_SOFTIRQ) | \
|
||||||
|
(1 << NET_RX_SOFTIRQ) | \
|
||||||
|
(1 << BLOCK_SOFTIRQ) | \
|
||||||
|
(1 << IRQ_POLL_SOFTIRQ) | \
|
||||||
|
(1 << TASKLET_SOFTIRQ))
|
||||||
|
|
||||||
/* map softirq index to softirq name. update 'softirq_to_name' in
|
/* map softirq index to softirq name. update 'softirq_to_name' in
|
||||||
* kernel/softirq.c when adding a new softirq.
|
* kernel/softirq.c when adding a new softirq.
|
||||||
@@ -513,6 +519,7 @@ extern void raise_softirq_irqoff(unsigned int nr);
|
|||||||
extern void raise_softirq(unsigned int nr);
|
extern void raise_softirq(unsigned int nr);
|
||||||
|
|
||||||
DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
|
DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
|
||||||
|
DECLARE_PER_CPU(__u32, active_softirqs);
|
||||||
|
|
||||||
static inline struct task_struct *this_cpu_ksoftirqd(void)
|
static inline struct task_struct *this_cpu_ksoftirqd(void)
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -1467,16 +1467,20 @@ static int find_lowest_rq(struct task_struct *task);
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* Return whether the task on the given cpu is currently non-preemptible
|
* Return whether the task on the given cpu is currently non-preemptible
|
||||||
* while handling a softirq or is likely to block preemptions soon because
|
* while handling a potentially long softint, or if the task is likely
|
||||||
* it is a ksoftirq thread.
|
* to block preemptions soon because it is a ksoftirq thread that is
|
||||||
|
* handling slow softints.
|
||||||
*/
|
*/
|
||||||
bool
|
bool
|
||||||
task_may_not_preempt(struct task_struct *task, int cpu)
|
task_may_not_preempt(struct task_struct *task, int cpu)
|
||||||
{
|
{
|
||||||
|
__u32 softirqs = per_cpu(active_softirqs, cpu) |
|
||||||
|
__IRQ_STAT(cpu, __softirq_pending);
|
||||||
struct task_struct *cpu_ksoftirqd = per_cpu(ksoftirqd, cpu);
|
struct task_struct *cpu_ksoftirqd = per_cpu(ksoftirqd, cpu);
|
||||||
|
|
||||||
return (task_thread_info(task)->preempt_count & SOFTIRQ_MASK) ||
|
return ((softirqs & LONG_SOFTIRQ_MASK) &&
|
||||||
task == cpu_ksoftirqd;
|
(task == cpu_ksoftirqd ||
|
||||||
|
task_thread_info(task)->preempt_count & SOFTIRQ_MASK));
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
|
|||||||
@@ -57,6 +57,13 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp
|
|||||||
|
|
||||||
DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
|
DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* active_softirqs -- per cpu, a mask of softirqs that are being handled,
|
||||||
|
* with the expectation that approximate answers are acceptable and therefore
|
||||||
|
* no synchronization.
|
||||||
|
*/
|
||||||
|
DEFINE_PER_CPU(__u32, active_softirqs);
|
||||||
|
|
||||||
const char * const softirq_to_name[NR_SOFTIRQS] = {
|
const char * const softirq_to_name[NR_SOFTIRQS] = {
|
||||||
"HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL",
|
"HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL",
|
||||||
"TASKLET", "SCHED", "HRTIMER", "RCU"
|
"TASKLET", "SCHED", "HRTIMER", "RCU"
|
||||||
@@ -272,6 +279,7 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)
|
|||||||
restart:
|
restart:
|
||||||
/* Reset the pending bitmask before enabling irqs */
|
/* Reset the pending bitmask before enabling irqs */
|
||||||
set_softirq_pending(0);
|
set_softirq_pending(0);
|
||||||
|
__this_cpu_write(active_softirqs, pending);
|
||||||
|
|
||||||
local_irq_enable();
|
local_irq_enable();
|
||||||
|
|
||||||
@@ -301,6 +309,7 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)
|
|||||||
pending >>= softirq_bit;
|
pending >>= softirq_bit;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
__this_cpu_write(active_softirqs, 0);
|
||||||
rcu_bh_qs();
|
rcu_bh_qs();
|
||||||
local_irq_disable();
|
local_irq_disable();
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user