Currently __set_oom_adj loops through all processes in the system to
keep oom_score_adj and oom_score_adj_min in sync between processes
sharing their mm. This is done for any task with more that one mm_users,
which includes processes with multiple threads (sharing mm and signals).
However for such processes the loop is unnecessary because their signal
structure is shared as well.
Android updates oom_score_adj whenever a tasks changes its role
(background/foreground/...) or binds to/unbinds from a service, making
it more/less important. Such operation can happen frequently.
We noticed that updates to oom_score_adj became more expensive and after
further investigation found out that the patch mentioned in "Fixes"
introduced a regression. Using Pixel 4 with a typical Android workload,
write time to oom_score_adj increased from ~3.57us to ~362us. Moreover
this regression linearly depends on the number of multi-threaded
processes running on the system.
Mark the mm with a new MMF_MULTIPROCESS flag bit when task is created with
(CLONE_VM && !CLONE_THREAD && !CLONE_VFORK). Change __set_oom_adj to use
MMF_MULTIPROCESS instead of mm_users to decide whether oom_score_adj
update should be synchronized between multiple processes. To prevent
races between clone() and __set_oom_adj(), when oom_score_adj of the
process being cloned might be modified from userspace, we use
oom_adj_mutex. Its scope is changed to global. The combination of
(CLONE_VM && !CLONE_THREAD) is rarely used except for the case of vfork().
To prevent performance regressions of vfork(), we skip taking oom_adj_mutex
and setting MMF_MULTIPROCESS when CLONE_VFORK is specified. Clearing the
MMF_MULTIPROCESS flag (when the last process sharing the mm exits) is left
out of this patch to keep it simple and because it is believed that this
threading model is rare. Should there ever be a need for optimizing that
case as well, it can be done by hooking into the exit path, likely
following the mm_update_next_owner pattern.
With the combination of (CLONE_VM && !CLONE_THREAD && !CLONE_VFORK) being
quite rare, the regression is gone after the change is applied.
Fixes: 44a70adec9 ("mm, oom_adj: make sure processes sharing mm have same view of oom_score_adj")
Reported-by: Tim Murray <timmurray@google.com>
Debugged-by: Minchan Kim <minchan@kernel.org>
Suggested-by: Michal Hocko <mhocko@kernel.org>
Signed-off-by: Suren Baghdasaryan <surenb@google.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Acked-by: Christian Brauner <christian.brauner@ubuntu.com>
Acked-by: Oleg Nesterov <oleg@redhat.com>
Change-Id: Ibc07d897eea94b0314d270832f678030f5a2095a
[charante@codeaurora.org: fixed trivial merge conflicts]
Git-Commit: 62b5255b46be83d1815b0043a024ab89a7f01907
Git-Commit: 02b2474b975733fbdb45462c35269639ed889290
Git-Repo: https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
Signed-off-by: Charan Teja Reddy <charante@codeaurora.org>
82 lines
2.7 KiB
C
82 lines
2.7 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef _LINUX_SCHED_COREDUMP_H
|
|
#define _LINUX_SCHED_COREDUMP_H
|
|
|
|
#include <linux/mm_types.h>
|
|
|
|
#define SUID_DUMP_DISABLE 0 /* No setuid dumping */
|
|
#define SUID_DUMP_USER 1 /* Dump as user of process */
|
|
#define SUID_DUMP_ROOT 2 /* Dump as root */
|
|
|
|
/* mm flags */
|
|
|
|
/* for SUID_DUMP_* above */
|
|
#define MMF_DUMPABLE_BITS 2
|
|
#define MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1)
|
|
|
|
extern void set_dumpable(struct mm_struct *mm, int value);
|
|
/*
|
|
* This returns the actual value of the suid_dumpable flag. For things
|
|
* that are using this for checking for privilege transitions, it must
|
|
* test against SUID_DUMP_USER rather than treating it as a boolean
|
|
* value.
|
|
*/
|
|
static inline int __get_dumpable(unsigned long mm_flags)
|
|
{
|
|
return mm_flags & MMF_DUMPABLE_MASK;
|
|
}
|
|
|
|
static inline int get_dumpable(struct mm_struct *mm)
|
|
{
|
|
return __get_dumpable(mm->flags);
|
|
}
|
|
|
|
/* coredump filter bits */
|
|
#define MMF_DUMP_ANON_PRIVATE 2
|
|
#define MMF_DUMP_ANON_SHARED 3
|
|
#define MMF_DUMP_MAPPED_PRIVATE 4
|
|
#define MMF_DUMP_MAPPED_SHARED 5
|
|
#define MMF_DUMP_ELF_HEADERS 6
|
|
#define MMF_DUMP_HUGETLB_PRIVATE 7
|
|
#define MMF_DUMP_HUGETLB_SHARED 8
|
|
#define MMF_DUMP_DAX_PRIVATE 9
|
|
#define MMF_DUMP_DAX_SHARED 10
|
|
|
|
#define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS
|
|
#define MMF_DUMP_FILTER_BITS 9
|
|
#define MMF_DUMP_FILTER_MASK \
|
|
(((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT)
|
|
#define MMF_DUMP_FILTER_DEFAULT \
|
|
((1 << MMF_DUMP_ANON_PRIVATE) | (1 << MMF_DUMP_ANON_SHARED) |\
|
|
(1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF)
|
|
|
|
#ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS
|
|
# define MMF_DUMP_MASK_DEFAULT_ELF (1 << MMF_DUMP_ELF_HEADERS)
|
|
#else
|
|
# define MMF_DUMP_MASK_DEFAULT_ELF 0
|
|
#endif
|
|
/* leave room for more dump flags */
|
|
#define MMF_VM_MERGEABLE 16 /* KSM may merge identical pages */
|
|
#define MMF_VM_HUGEPAGE 17 /* set when VM_HUGEPAGE is set on vma */
|
|
/*
|
|
* This one-shot flag is dropped due to necessity of changing exe once again
|
|
* on NFS restore
|
|
*/
|
|
//#define MMF_EXE_FILE_CHANGED 18 /* see prctl_set_mm_exe_file() */
|
|
|
|
#define MMF_HAS_UPROBES 19 /* has uprobes */
|
|
#define MMF_RECALC_UPROBES 20 /* MMF_HAS_UPROBES can be wrong */
|
|
#define MMF_OOM_SKIP 21 /* mm is of no interest for the OOM killer */
|
|
#define MMF_UNSTABLE 22 /* mm is unstable for copy_from_user */
|
|
#define MMF_HUGE_ZERO_PAGE 23 /* mm has ever used the global huge zero page */
|
|
#define MMF_DISABLE_THP 24 /* disable THP for all VMAs */
|
|
#define MMF_OOM_VICTIM 25 /* mm is the oom victim */
|
|
#define MMF_OOM_REAP_QUEUED 26 /* mm was queued for oom_reaper */
|
|
#define MMF_MULTIPROCESS 27 /* mm is shared between processes */
|
|
#define MMF_DISABLE_THP_MASK (1 << MMF_DISABLE_THP)
|
|
|
|
#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\
|
|
MMF_DISABLE_THP_MASK)
|
|
|
|
#endif /* _LINUX_SCHED_COREDUMP_H */
|