2017-11-01 15:07:57 +01:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0 */
|
2006-10-19 23:28:32 -07:00
|
|
|
#ifndef __INCLUDE_LINUX_OOM_H
|
|
|
|
|
#define __INCLUDE_LINUX_OOM_H
|
|
|
|
|
|
2007-10-16 23:25:53 -07:00
|
|
|
|
2017-02-08 18:51:30 +01:00
|
|
|
#include <linux/sched/signal.h>
|
2007-10-16 23:25:59 -07:00
|
|
|
#include <linux/types.h>
|
2009-12-15 16:45:33 -08:00
|
|
|
#include <linux/nodemask.h>
|
2012-10-13 10:46:48 +01:00
|
|
|
#include <uapi/linux/oom.h>
|
2017-08-18 15:16:15 -07:00
|
|
|
#include <linux/sched/coredump.h> /* MMF_* */
|
|
|
|
|
#include <linux/mm.h> /* VM_FAULT* */
|
2007-10-16 23:25:59 -07:00
|
|
|
|
|
|
|
|
struct zonelist;
|
|
|
|
|
struct notifier_block;
|
2010-08-09 17:19:43 -07:00
|
|
|
struct mem_cgroup;
|
|
|
|
|
struct task_struct;
|
2007-10-16 23:25:59 -07:00
|
|
|
|
2018-12-28 00:36:07 -08:00
|
|
|
enum oom_constraint {
|
|
|
|
|
CONSTRAINT_NONE,
|
|
|
|
|
CONSTRAINT_CPUSET,
|
|
|
|
|
CONSTRAINT_MEMORY_POLICY,
|
|
|
|
|
CONSTRAINT_MEMCG,
|
|
|
|
|
};
|
|
|
|
|
|
2015-09-08 15:00:44 -07:00
|
|
|
/*
|
|
|
|
|
* Details of the page allocation that triggered the oom killer that are used to
|
|
|
|
|
* determine what should be killed.
|
|
|
|
|
*/
|
2015-09-08 15:00:36 -07:00
|
|
|
struct oom_control {
|
2015-09-08 15:00:44 -07:00
|
|
|
/* Used to determine cpuset */
|
2015-09-08 15:00:36 -07:00
|
|
|
struct zonelist *zonelist;
|
2015-09-08 15:00:44 -07:00
|
|
|
|
|
|
|
|
/* Used to determine mempolicy */
|
|
|
|
|
nodemask_t *nodemask;
|
|
|
|
|
|
2016-07-26 15:22:33 -07:00
|
|
|
/* Memory cgroup in which oom is invoked, or NULL for global oom */
|
|
|
|
|
struct mem_cgroup *memcg;
|
|
|
|
|
|
2015-09-08 15:00:44 -07:00
|
|
|
/* Used to determine cpuset and node locality requirement */
|
|
|
|
|
const gfp_t gfp_mask;
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* order == -1 means the oom kill is required by sysrq, otherwise only
|
|
|
|
|
* for display purposes.
|
|
|
|
|
*/
|
|
|
|
|
const int order;
|
2015-09-08 15:00:36 -07:00
|
|
|
|
2016-10-07 16:57:23 -07:00
|
|
|
/* Used by oom implementation, do not set */
|
|
|
|
|
unsigned long totalpages;
|
|
|
|
|
struct task_struct *chosen;
|
2020-08-11 18:31:22 -07:00
|
|
|
long chosen_points;
|
2018-12-28 00:36:07 -08:00
|
|
|
|
|
|
|
|
/* Used to print the constraint info. */
|
|
|
|
|
enum oom_constraint constraint;
|
2012-07-31 16:43:44 -07:00
|
|
|
};
|
|
|
|
|
|
2015-06-24 16:57:19 -07:00
|
|
|
extern struct mutex oom_lock;
|
2020-10-13 16:58:35 -07:00
|
|
|
extern struct mutex oom_adj_mutex;
|
2015-06-24 16:57:19 -07:00
|
|
|
|
2012-12-11 16:02:56 -08:00
|
|
|
static inline void set_current_oom_origin(void)
|
|
|
|
|
{
|
2016-05-23 16:23:57 -07:00
|
|
|
current->signal->oom_flag_origin = true;
|
2012-12-11 16:02:56 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static inline void clear_current_oom_origin(void)
|
|
|
|
|
{
|
2016-05-23 16:23:57 -07:00
|
|
|
current->signal->oom_flag_origin = false;
|
2012-12-11 16:02:56 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static inline bool oom_task_origin(const struct task_struct *p)
|
|
|
|
|
{
|
2016-05-23 16:23:57 -07:00
|
|
|
return p->signal->oom_flag_origin;
|
2012-12-11 16:02:56 -08:00
|
|
|
}
|
2011-05-24 17:11:40 -07:00
|
|
|
|
2016-10-07 16:58:57 -07:00
|
|
|
static inline bool tsk_is_oom_victim(struct task_struct * tsk)
|
|
|
|
|
{
|
|
|
|
|
return tsk->signal->oom_mm;
|
|
|
|
|
}
|
|
|
|
|
|
2017-12-14 15:33:15 -08:00
|
|
|
/*
|
|
|
|
|
* Use this helper if tsk->mm != mm and the victim mm needs a special
|
|
|
|
|
* handling. This is guaranteed to stay true after once set.
|
|
|
|
|
*/
|
|
|
|
|
static inline bool mm_is_oom_victim(struct mm_struct *mm)
|
|
|
|
|
{
|
|
|
|
|
return test_bit(MMF_OOM_VICTIM, &mm->flags);
|
|
|
|
|
}
|
|
|
|
|
|
2017-08-18 15:16:15 -07:00
|
|
|
/*
|
|
|
|
|
* Checks whether a page fault on the given mm is still reliable.
|
|
|
|
|
* This is no longer true if the oom reaper started to reap the
|
|
|
|
|
* address space which is reflected by MMF_UNSTABLE flag set in
|
|
|
|
|
* the mm. At that moment any !shared mapping would lose the content
|
|
|
|
|
* and could cause a memory corruption (zero pages instead of the
|
|
|
|
|
* original content).
|
|
|
|
|
*
|
|
|
|
|
* User should call this before establishing a page table entry for
|
|
|
|
|
* a !shared mapping and under the proper page table lock.
|
|
|
|
|
*
|
|
|
|
|
* Return 0 when the PF is safe VM_FAULT_SIGBUS otherwise.
|
|
|
|
|
*/
|
2018-08-23 17:01:36 -07:00
|
|
|
static inline vm_fault_t check_stable_address_space(struct mm_struct *mm)
|
2017-08-18 15:16:15 -07:00
|
|
|
{
|
|
|
|
|
if (unlikely(test_bit(MMF_UNSTABLE, &mm->flags)))
|
|
|
|
|
return VM_FAULT_SIGBUS;
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2018-08-21 21:52:33 -07:00
|
|
|
bool __oom_reap_task_mm(struct mm_struct *mm);
|
2018-05-11 16:02:04 -07:00
|
|
|
|
2020-08-11 18:31:22 -07:00
|
|
|
long oom_badness(struct task_struct *p,
|
2012-05-29 15:06:47 -07:00
|
|
|
unsigned long totalpages);
|
2014-10-20 18:12:32 +02:00
|
|
|
|
2015-09-08 15:00:36 -07:00
|
|
|
extern bool out_of_memory(struct oom_control *oc);
|
2015-06-24 16:57:07 -07:00
|
|
|
|
2016-10-07 16:59:03 -07:00
|
|
|
extern void exit_oom_victim(void);
|
2015-06-24 16:57:07 -07:00
|
|
|
|
2007-10-16 23:25:53 -07:00
|
|
|
extern int register_oom_notifier(struct notifier_block *nb);
|
|
|
|
|
extern int unregister_oom_notifier(struct notifier_block *nb);
|
|
|
|
|
|
2016-10-07 16:59:00 -07:00
|
|
|
extern bool oom_killer_disable(signed long timeout);
|
2015-02-11 15:26:24 -08:00
|
|
|
extern void oom_killer_enable(void);
|
2010-08-09 17:18:56 -07:00
|
|
|
|
2010-08-10 18:03:00 -07:00
|
|
|
extern struct task_struct *find_lock_task_mm(struct task_struct *p);
|
|
|
|
|
|
2010-08-09 17:18:56 -07:00
|
|
|
/* sysctls */
|
|
|
|
|
extern int sysctl_oom_dump_tasks;
|
|
|
|
|
extern int sysctl_oom_kill_allocating_task;
|
|
|
|
|
extern int sysctl_panic_on_oom;
|
2007-10-16 23:25:53 -07:00
|
|
|
#endif /* _INCLUDE_LINUX_OOM_H */
|