1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 1998,2000 Rik van Riel
6 * Thanks go out to Claus Fischer for some serious inspiration and
7 * for goading me into coding this file...
8 * Copyright (C) 2010 Google, Inc.
9 * Rewritten by David Rientjes
11 * The routines in this file are used to kill a process when
12 * we're seriously out of memory. This gets called from __alloc_pages()
13 * in mm/page_alloc.c when we really run out of memory.
15 * Since we won't call these routines often (on a well-configured
16 * machine) this file will double as a 'coding guide' and a signpost
17 * for newbie kernel hackers. It features several pointers to major
18 * kernel subsystems and hints as to where to find out what things do.
21 #include <linux/oom.h>
23 #include <linux/err.h>
24 #include <linux/gfp.h>
25 #include <linux/sched.h>
26 #include <linux/sched/mm.h>
27 #include <linux/sched/coredump.h>
28 #include <linux/sched/task.h>
29 #include <linux/swap.h>
30 #include <linux/timex.h>
31 #include <linux/jiffies.h>
32 #include <linux/cpuset.h>
33 #include <linux/export.h>
34 #include <linux/notifier.h>
35 #include <linux/memcontrol.h>
36 #include <linux/mempolicy.h>
37 #include <linux/security.h>
38 #include <linux/ptrace.h>
39 #include <linux/freezer.h>
40 #include <linux/ftrace.h>
41 #include <linux/ratelimit.h>
42 #include <linux/kthread.h>
43 #include <linux/init.h>
44 #include <linux/mmu_notifier.h>
50 #define CREATE_TRACE_POINTS
51 #include <trace/events/oom.h>
53 int sysctl_panic_on_oom;
54 int sysctl_oom_kill_allocating_task;
55 int sysctl_oom_dump_tasks = 1;
58 * Serializes oom killer invocations (out_of_memory()) from all contexts to
59 * prevent from over eager oom killing (e.g. when the oom killer is invoked
60 * from different domains).
62 * oom_killer_disable() relies on this lock to stabilize oom_killer_disabled
65 DEFINE_MUTEX(oom_lock);
69 * has_intersects_mems_allowed() - check task eligiblity for kill
70 * @start: task struct of which task to consider
71 * @mask: nodemask passed to page allocator for mempolicy ooms
73 * Task eligibility is determined by whether or not a candidate task, @tsk,
74 * shares the same mempolicy nodes as current if it is bound by such a policy
75 * and whether or not it has the same set of allowed cpuset nodes.
77 static bool has_intersects_mems_allowed(struct task_struct *start,
78 const nodemask_t *mask)
80 struct task_struct *tsk;
84 for_each_thread(start, tsk) {
87 * If this is a mempolicy constrained oom, tsk's
88 * cpuset is irrelevant. Only return true if its
89 * mempolicy intersects current, otherwise it may be
92 ret = mempolicy_nodemask_intersects(tsk, mask);
95 * This is not a mempolicy constrained oom, so only
96 * check the mems of tsk's cpuset.
98 ret = cpuset_mems_allowed_intersects(current, tsk);
108 static bool has_intersects_mems_allowed(struct task_struct *tsk,
109 const nodemask_t *mask)
113 #endif /* CONFIG_NUMA */
116 * The process p may have detached its own ->mm while exiting or through
117 * use_mm(), but one or more of its subthreads may still have a valid
118 * pointer. Return p, or any of its subthreads with a valid ->mm, with
121 struct task_struct *find_lock_task_mm(struct task_struct *p)
123 struct task_struct *t;
127 for_each_thread(p, t) {
141 * order == -1 means the oom kill is required by sysrq, otherwise only
142 * for display purposes.
144 static inline bool is_sysrq_oom(struct oom_control *oc)
146 return oc->order == -1;
149 static inline bool is_memcg_oom(struct oom_control *oc)
151 return oc->memcg != NULL;
154 /* return true if the task is not adequate as candidate victim task. */
155 static bool oom_unkillable_task(struct task_struct *p,
156 struct mem_cgroup *memcg, const nodemask_t *nodemask)
158 if (is_global_init(p))
160 if (p->flags & PF_KTHREAD)
163 /* When mem_cgroup_out_of_memory() and p is not member of the group */
164 if (memcg && !task_in_mem_cgroup(p, memcg))
167 /* p may not have freeable memory in nodemask */
168 if (!has_intersects_mems_allowed(p, nodemask))
175 * Print out unreclaimble slabs info when unreclaimable slabs amount is greater
176 * than all user memory (LRU pages)
178 static bool is_dump_unreclaim_slabs(void)
180 unsigned long nr_lru;
182 nr_lru = global_node_page_state(NR_ACTIVE_ANON) +
183 global_node_page_state(NR_INACTIVE_ANON) +
184 global_node_page_state(NR_ACTIVE_FILE) +
185 global_node_page_state(NR_INACTIVE_FILE) +
186 global_node_page_state(NR_ISOLATED_ANON) +
187 global_node_page_state(NR_ISOLATED_FILE) +
188 global_node_page_state(NR_UNEVICTABLE);
190 return (global_node_page_state(NR_SLAB_UNRECLAIMABLE) > nr_lru);
194 * oom_badness - heuristic function to determine which candidate task to kill
195 * @p: task struct of which task we should calculate
196 * @totalpages: total present RAM allowed for page allocation
197 * @memcg: task's memory controller, if constrained
198 * @nodemask: nodemask passed to page allocator for mempolicy ooms
200 * The heuristic for determining which task to kill is made to be as simple and
201 * predictable as possible. The goal is to return the highest value for the
202 * task consuming the most memory to avoid subsequent oom failures.
204 unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg,
205 const nodemask_t *nodemask, unsigned long totalpages)
210 if (oom_unkillable_task(p, memcg, nodemask))
213 p = find_lock_task_mm(p);
218 * Do not even consider tasks which are explicitly marked oom
219 * unkillable or have been already oom reaped or the are in
220 * the middle of vfork
222 adj = (long)p->signal->oom_score_adj;
223 if (adj == OOM_SCORE_ADJ_MIN ||
224 test_bit(MMF_OOM_SKIP, &p->mm->flags) ||
231 * The baseline for the badness score is the proportion of RAM that each
232 * task's rss, pagetable and swap space use.
234 points = get_mm_rss(p->mm) + get_mm_counter(p->mm, MM_SWAPENTS) +
235 mm_pgtables_bytes(p->mm) / PAGE_SIZE;
238 /* Normalize to oom_score_adj units */
239 adj *= totalpages / 1000;
243 * Never return 0 for an eligible task regardless of the root bonus and
244 * oom_score_adj (oom_score_adj can't be OOM_SCORE_ADJ_MIN here).
246 return points > 0 ? points : 1;
249 static const char * const oom_constraint_text[] = {
250 [CONSTRAINT_NONE] = "CONSTRAINT_NONE",
251 [CONSTRAINT_CPUSET] = "CONSTRAINT_CPUSET",
252 [CONSTRAINT_MEMORY_POLICY] = "CONSTRAINT_MEMORY_POLICY",
253 [CONSTRAINT_MEMCG] = "CONSTRAINT_MEMCG",
257 * Determine the type of allocation constraint.
259 static enum oom_constraint constrained_alloc(struct oom_control *oc)
263 enum zone_type high_zoneidx = gfp_zone(oc->gfp_mask);
264 bool cpuset_limited = false;
267 if (is_memcg_oom(oc)) {
268 oc->totalpages = mem_cgroup_get_max(oc->memcg) ?: 1;
269 return CONSTRAINT_MEMCG;
272 /* Default to all available memory */
273 oc->totalpages = totalram_pages() + total_swap_pages;
275 if (!IS_ENABLED(CONFIG_NUMA))
276 return CONSTRAINT_NONE;
279 return CONSTRAINT_NONE;
281 * Reach here only when __GFP_NOFAIL is used. So, we should avoid
282 * to kill current.We have to random task kill in this case.
283 * Hopefully, CONSTRAINT_THISNODE...but no way to handle it, now.
285 if (oc->gfp_mask & __GFP_THISNODE)
286 return CONSTRAINT_NONE;
289 * This is not a __GFP_THISNODE allocation, so a truncated nodemask in
290 * the page allocator means a mempolicy is in effect. Cpuset policy
291 * is enforced in get_page_from_freelist().
294 !nodes_subset(node_states[N_MEMORY], *oc->nodemask)) {
295 oc->totalpages = total_swap_pages;
296 for_each_node_mask(nid, *oc->nodemask)
297 oc->totalpages += node_spanned_pages(nid);
298 return CONSTRAINT_MEMORY_POLICY;
301 /* Check this allocation failure is caused by cpuset's wall function */
302 for_each_zone_zonelist_nodemask(zone, z, oc->zonelist,
303 high_zoneidx, oc->nodemask)
304 if (!cpuset_zone_allowed(zone, oc->gfp_mask))
305 cpuset_limited = true;
307 if (cpuset_limited) {
308 oc->totalpages = total_swap_pages;
309 for_each_node_mask(nid, cpuset_current_mems_allowed)
310 oc->totalpages += node_spanned_pages(nid);
311 return CONSTRAINT_CPUSET;
313 return CONSTRAINT_NONE;
316 static int oom_evaluate_task(struct task_struct *task, void *arg)
318 struct oom_control *oc = arg;
319 unsigned long points;
321 if (oom_unkillable_task(task, NULL, oc->nodemask))
325 * This task already has access to memory reserves and is being killed.
326 * Don't allow any other task to have access to the reserves unless
327 * the task has MMF_OOM_SKIP because chances that it would release
328 * any memory is quite low.
330 if (!is_sysrq_oom(oc) && tsk_is_oom_victim(task)) {
331 if (test_bit(MMF_OOM_SKIP, &task->signal->oom_mm->flags))
337 * If task is allocating a lot of memory and has been marked to be
338 * killed first if it triggers an oom, then select it.
340 if (oom_task_origin(task)) {
345 points = oom_badness(task, NULL, oc->nodemask, oc->totalpages);
346 if (!points || points < oc->chosen_points)
351 put_task_struct(oc->chosen);
352 get_task_struct(task);
354 oc->chosen_points = points;
359 put_task_struct(oc->chosen);
360 oc->chosen = (void *)-1UL;
365 * Simple selection loop. We choose the process with the highest number of
366 * 'points'. In case scan was aborted, oc->chosen is set to -1.
368 static void select_bad_process(struct oom_control *oc)
370 if (is_memcg_oom(oc))
371 mem_cgroup_scan_tasks(oc->memcg, oom_evaluate_task, oc);
373 struct task_struct *p;
377 if (oom_evaluate_task(p, oc))
382 oc->chosen_points = oc->chosen_points * 1000 / oc->totalpages;
386 * dump_tasks - dump current memory state of all system tasks
387 * @memcg: current's memory controller, if constrained
388 * @nodemask: nodemask passed to page allocator for mempolicy ooms
390 * Dumps the current memory state of all eligible tasks. Tasks not in the same
391 * memcg, not in the same cpuset, or bound to a disjoint set of mempolicy nodes
393 * State information includes task's pid, uid, tgid, vm size, rss,
394 * pgtables_bytes, swapents, oom_score_adj value, and name.
396 static void dump_tasks(struct mem_cgroup *memcg, const nodemask_t *nodemask)
398 struct task_struct *p;
399 struct task_struct *task;
401 pr_info("Tasks state (memory values in pages):\n");
402 pr_info("[ pid ] uid tgid total_vm rss pgtables_bytes swapents oom_score_adj name\n");
404 for_each_process(p) {
405 if (oom_unkillable_task(p, memcg, nodemask))
408 task = find_lock_task_mm(p);
411 * This is a kthread or all of p's threads have already
412 * detached their mm's. There's no need to report
413 * them; they can't be oom killed anyway.
418 pr_info("[%7d] %5d %5d %8lu %8lu %8ld %8lu %5hd %s\n",
419 task->pid, from_kuid(&init_user_ns, task_uid(task)),
420 task->tgid, task->mm->total_vm, get_mm_rss(task->mm),
421 mm_pgtables_bytes(task->mm),
422 get_mm_counter(task->mm, MM_SWAPENTS),
423 task->signal->oom_score_adj, task->comm);
429 static void dump_oom_summary(struct oom_control *oc, struct task_struct *victim)
431 /* one line summary of the oom killer context. */
432 pr_info("oom-kill:constraint=%s,nodemask=%*pbl",
433 oom_constraint_text[oc->constraint],
434 nodemask_pr_args(oc->nodemask));
435 cpuset_print_current_mems_allowed();
436 mem_cgroup_print_oom_context(oc->memcg, victim);
437 pr_cont(",task=%s,pid=%d,uid=%d\n", victim->comm, victim->pid,
438 from_kuid(&init_user_ns, task_uid(victim)));
441 static void dump_header(struct oom_control *oc, struct task_struct *p)
443 pr_warn("%s invoked oom-killer: gfp_mask=%#x(%pGg), order=%d, oom_score_adj=%hd\n",
444 current->comm, oc->gfp_mask, &oc->gfp_mask, oc->order,
445 current->signal->oom_score_adj);
446 if (!IS_ENABLED(CONFIG_COMPACTION) && oc->order)
447 pr_warn("COMPACTION is disabled!!!\n");
450 if (is_memcg_oom(oc))
451 mem_cgroup_print_oom_meminfo(oc->memcg);
453 show_mem(SHOW_MEM_FILTER_NODES, oc->nodemask);
454 if (is_dump_unreclaim_slabs())
455 dump_unreclaimable_slab();
457 if (sysctl_oom_dump_tasks)
458 dump_tasks(oc->memcg, oc->nodemask);
460 dump_oom_summary(oc, p);
464 * Number of OOM victims in flight
466 static atomic_t oom_victims = ATOMIC_INIT(0);
467 static DECLARE_WAIT_QUEUE_HEAD(oom_victims_wait);
469 static bool oom_killer_disabled __read_mostly;
471 #define K(x) ((x) << (PAGE_SHIFT-10))
474 * task->mm can be NULL if the task is the exited group leader. So to
475 * determine whether the task is using a particular mm, we examine all the
476 * task's threads: if one of those is using this mm then this task was also
479 bool process_shares_mm(struct task_struct *p, struct mm_struct *mm)
481 struct task_struct *t;
483 for_each_thread(p, t) {
484 struct mm_struct *t_mm = READ_ONCE(t->mm);
493 * OOM Reaper kernel thread which tries to reap the memory used by the OOM
494 * victim (if that is possible) to help the OOM killer to move on.
496 static struct task_struct *oom_reaper_th;
497 static DECLARE_WAIT_QUEUE_HEAD(oom_reaper_wait);
498 static struct task_struct *oom_reaper_list;
499 static DEFINE_SPINLOCK(oom_reaper_lock);
501 bool __oom_reap_task_mm(struct mm_struct *mm)
503 struct vm_area_struct *vma;
507 * Tell all users of get_user/copy_from_user etc... that the content
508 * is no longer stable. No barriers really needed because unmapping
509 * should imply barriers already and the reader would hit a page fault
510 * if it stumbled over a reaped memory.
512 set_bit(MMF_UNSTABLE, &mm->flags);
514 for (vma = mm->mmap ; vma; vma = vma->vm_next) {
515 if (!can_madv_dontneed_vma(vma))
519 * Only anonymous pages have a good chance to be dropped
520 * without additional steps which we cannot afford as we
523 * We do not even care about fs backed pages because all
524 * which are reclaimable have already been reclaimed and
525 * we do not want to block exit_mmap by keeping mm ref
526 * count elevated without a good reason.
528 if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED)) {
529 struct mmu_notifier_range range;
530 struct mmu_gather tlb;
532 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0,
533 vma, mm, vma->vm_start,
535 tlb_gather_mmu(&tlb, mm, range.start, range.end);
536 if (mmu_notifier_invalidate_range_start_nonblock(&range)) {
537 tlb_finish_mmu(&tlb, range.start, range.end);
541 unmap_page_range(&tlb, vma, range.start, range.end, NULL);
542 mmu_notifier_invalidate_range_end(&range);
543 tlb_finish_mmu(&tlb, range.start, range.end);
551 * Reaps the address space of the give task.
553 * Returns true on success and false if none or part of the address space
554 * has been reclaimed and the caller should retry later.
556 static bool oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
560 if (!down_read_trylock(&mm->mmap_sem)) {
561 trace_skip_task_reaping(tsk->pid);
566 * MMF_OOM_SKIP is set by exit_mmap when the OOM reaper can't
567 * work on the mm anymore. The check for MMF_OOM_SKIP must run
568 * under mmap_sem for reading because it serializes against the
569 * down_write();up_write() cycle in exit_mmap().
571 if (test_bit(MMF_OOM_SKIP, &mm->flags)) {
572 trace_skip_task_reaping(tsk->pid);
576 trace_start_task_reaping(tsk->pid);
578 /* failed to reap part of the address space. Try again later */
579 ret = __oom_reap_task_mm(mm);
583 pr_info("oom_reaper: reaped process %d (%s), now anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n",
584 task_pid_nr(tsk), tsk->comm,
585 K(get_mm_counter(mm, MM_ANONPAGES)),
586 K(get_mm_counter(mm, MM_FILEPAGES)),
587 K(get_mm_counter(mm, MM_SHMEMPAGES)));
589 trace_finish_task_reaping(tsk->pid);
591 up_read(&mm->mmap_sem);
596 #define MAX_OOM_REAP_RETRIES 10
597 static void oom_reap_task(struct task_struct *tsk)
600 struct mm_struct *mm = tsk->signal->oom_mm;
602 /* Retry the down_read_trylock(mmap_sem) a few times */
603 while (attempts++ < MAX_OOM_REAP_RETRIES && !oom_reap_task_mm(tsk, mm))
604 schedule_timeout_idle(HZ/10);
606 if (attempts <= MAX_OOM_REAP_RETRIES ||
607 test_bit(MMF_OOM_SKIP, &mm->flags))
610 pr_info("oom_reaper: unable to reap pid:%d (%s)\n",
611 task_pid_nr(tsk), tsk->comm);
612 debug_show_all_locks();
615 tsk->oom_reaper_list = NULL;
618 * Hide this mm from OOM killer because it has been either reaped or
619 * somebody can't call up_write(mmap_sem).
621 set_bit(MMF_OOM_SKIP, &mm->flags);
623 /* Drop a reference taken by wake_oom_reaper */
624 put_task_struct(tsk);
627 static int oom_reaper(void *unused)
630 struct task_struct *tsk = NULL;
632 wait_event_freezable(oom_reaper_wait, oom_reaper_list != NULL);
633 spin_lock(&oom_reaper_lock);
634 if (oom_reaper_list != NULL) {
635 tsk = oom_reaper_list;
636 oom_reaper_list = tsk->oom_reaper_list;
638 spin_unlock(&oom_reaper_lock);
647 static void wake_oom_reaper(struct task_struct *tsk)
649 /* mm is already queued? */
650 if (test_and_set_bit(MMF_OOM_REAP_QUEUED, &tsk->signal->oom_mm->flags))
653 get_task_struct(tsk);
655 spin_lock(&oom_reaper_lock);
656 tsk->oom_reaper_list = oom_reaper_list;
657 oom_reaper_list = tsk;
658 spin_unlock(&oom_reaper_lock);
659 trace_wake_reaper(tsk->pid);
660 wake_up(&oom_reaper_wait);
663 static int __init oom_init(void)
665 oom_reaper_th = kthread_run(oom_reaper, NULL, "oom_reaper");
668 subsys_initcall(oom_init)
670 static inline void wake_oom_reaper(struct task_struct *tsk)
673 #endif /* CONFIG_MMU */
676 * mark_oom_victim - mark the given task as OOM victim
679 * Has to be called with oom_lock held and never after
680 * oom has been disabled already.
682 * tsk->mm has to be non NULL and caller has to guarantee it is stable (either
683 * under task_lock or operate on the current).
685 static void mark_oom_victim(struct task_struct *tsk)
687 struct mm_struct *mm = tsk->mm;
689 WARN_ON(oom_killer_disabled);
690 /* OOM killer might race with memcg OOM */
691 if (test_and_set_tsk_thread_flag(tsk, TIF_MEMDIE))
694 /* oom_mm is bound to the signal struct life time. */
695 if (!cmpxchg(&tsk->signal->oom_mm, NULL, mm)) {
696 mmgrab(tsk->signal->oom_mm);
697 set_bit(MMF_OOM_VICTIM, &mm->flags);
701 * Make sure that the task is woken up from uninterruptible sleep
702 * if it is frozen because OOM killer wouldn't be able to free
703 * any memory and livelock. freezing_slow_path will tell the freezer
704 * that TIF_MEMDIE tasks should be ignored.
707 atomic_inc(&oom_victims);
708 trace_mark_victim(tsk->pid);
712 * exit_oom_victim - note the exit of an OOM victim
714 void exit_oom_victim(void)
716 clear_thread_flag(TIF_MEMDIE);
718 if (!atomic_dec_return(&oom_victims))
719 wake_up_all(&oom_victims_wait);
723 * oom_killer_enable - enable OOM killer
725 void oom_killer_enable(void)
727 oom_killer_disabled = false;
728 pr_info("OOM killer enabled.\n");
732 * oom_killer_disable - disable OOM killer
733 * @timeout: maximum timeout to wait for oom victims in jiffies
735 * Forces all page allocations to fail rather than trigger OOM killer.
736 * Will block and wait until all OOM victims are killed or the given
739 * The function cannot be called when there are runnable user tasks because
740 * the userspace would see unexpected allocation failures as a result. Any
741 * new usage of this function should be consulted with MM people.
743 * Returns true if successful and false if the OOM killer cannot be
746 bool oom_killer_disable(signed long timeout)
751 * Make sure to not race with an ongoing OOM killer. Check that the
752 * current is not killed (possibly due to sharing the victim's memory).
754 if (mutex_lock_killable(&oom_lock))
756 oom_killer_disabled = true;
757 mutex_unlock(&oom_lock);
759 ret = wait_event_interruptible_timeout(oom_victims_wait,
760 !atomic_read(&oom_victims), timeout);
765 pr_info("OOM killer disabled.\n");
770 static inline bool __task_will_free_mem(struct task_struct *task)
772 struct signal_struct *sig = task->signal;
775 * A coredumping process may sleep for an extended period in exit_mm(),
776 * so the oom killer cannot assume that the process will promptly exit
777 * and release memory.
779 if (sig->flags & SIGNAL_GROUP_COREDUMP)
782 if (sig->flags & SIGNAL_GROUP_EXIT)
785 if (thread_group_empty(task) && (task->flags & PF_EXITING))
792 * Checks whether the given task is dying or exiting and likely to
793 * release its address space. This means that all threads and processes
794 * sharing the same mm have to be killed or exiting.
795 * Caller has to make sure that task->mm is stable (hold task_lock or
796 * it operates on the current).
798 static bool task_will_free_mem(struct task_struct *task)
800 struct mm_struct *mm = task->mm;
801 struct task_struct *p;
805 * Skip tasks without mm because it might have passed its exit_mm and
806 * exit_oom_victim. oom_reaper could have rescued that but do not rely
807 * on that for now. We can consider find_lock_task_mm in future.
812 if (!__task_will_free_mem(task))
816 * This task has already been drained by the oom reaper so there are
817 * only small chances it will free some more
819 if (test_bit(MMF_OOM_SKIP, &mm->flags))
822 if (atomic_read(&mm->mm_users) <= 1)
826 * Make sure that all tasks which share the mm with the given tasks
827 * are dying as well to make sure that a) nobody pins its mm and
828 * b) the task is also reapable by the oom reaper.
831 for_each_process(p) {
832 if (!process_shares_mm(p, mm))
834 if (same_thread_group(task, p))
836 ret = __task_will_free_mem(p);
845 static void __oom_kill_process(struct task_struct *victim, const char *message)
847 struct task_struct *p;
848 struct mm_struct *mm;
849 bool can_oom_reap = true;
851 p = find_lock_task_mm(victim);
853 put_task_struct(victim);
855 } else if (victim != p) {
857 put_task_struct(victim);
861 /* Get a reference to safely compare mm after task_unlock(victim) */
865 /* Raise event before sending signal: task reaper must see this */
866 count_vm_event(OOM_KILL);
867 memcg_memory_event_mm(mm, MEMCG_OOM_KILL);
870 * We should send SIGKILL before granting access to memory reserves
871 * in order to prevent the OOM victim from depleting the memory
872 * reserves from the user space under its control.
874 do_send_sig_info(SIGKILL, SEND_SIG_PRIV, victim, PIDTYPE_TGID);
875 mark_oom_victim(victim);
876 pr_err("%s: Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n",
877 message, task_pid_nr(victim), victim->comm,
878 K(victim->mm->total_vm),
879 K(get_mm_counter(victim->mm, MM_ANONPAGES)),
880 K(get_mm_counter(victim->mm, MM_FILEPAGES)),
881 K(get_mm_counter(victim->mm, MM_SHMEMPAGES)));
885 * Kill all user processes sharing victim->mm in other thread groups, if
886 * any. They don't get access to memory reserves, though, to avoid
887 * depletion of all memory. This prevents mm->mmap_sem livelock when an
888 * oom killed thread cannot exit because it requires the semaphore and
889 * its contended by another thread trying to allocate memory itself.
890 * That thread will now get access to memory reserves since it has a
891 * pending fatal signal.
894 for_each_process(p) {
895 if (!process_shares_mm(p, mm))
897 if (same_thread_group(p, victim))
899 if (is_global_init(p)) {
900 can_oom_reap = false;
901 set_bit(MMF_OOM_SKIP, &mm->flags);
902 pr_info("oom killer %d (%s) has mm pinned by %d (%s)\n",
903 task_pid_nr(victim), victim->comm,
904 task_pid_nr(p), p->comm);
908 * No use_mm() user needs to read from the userspace so we are
911 if (unlikely(p->flags & PF_KTHREAD))
913 do_send_sig_info(SIGKILL, SEND_SIG_PRIV, p, PIDTYPE_TGID);
918 wake_oom_reaper(victim);
921 put_task_struct(victim);
926 * Kill provided task unless it's secured by setting
927 * oom_score_adj to OOM_SCORE_ADJ_MIN.
929 static int oom_kill_memcg_member(struct task_struct *task, void *message)
931 if (task->signal->oom_score_adj != OOM_SCORE_ADJ_MIN &&
932 !is_global_init(task)) {
933 get_task_struct(task);
934 __oom_kill_process(task, message);
939 static void oom_kill_process(struct oom_control *oc, const char *message)
941 struct task_struct *victim = oc->chosen;
942 struct mem_cgroup *oom_group;
943 static DEFINE_RATELIMIT_STATE(oom_rs, DEFAULT_RATELIMIT_INTERVAL,
944 DEFAULT_RATELIMIT_BURST);
947 * If the task is already exiting, don't alarm the sysadmin or kill
948 * its children or threads, just give it access to memory reserves
949 * so it can die quickly
952 if (task_will_free_mem(victim)) {
953 mark_oom_victim(victim);
954 wake_oom_reaper(victim);
956 put_task_struct(victim);
961 if (__ratelimit(&oom_rs))
962 dump_header(oc, victim);
965 * Do we need to kill the entire memory cgroup?
966 * Or even one of the ancestor memory cgroups?
967 * Check this out before killing the victim task.
969 oom_group = mem_cgroup_get_oom_group(victim, oc->memcg);
971 __oom_kill_process(victim, message);
974 * If necessary, kill all tasks in the selected memory cgroup.
977 mem_cgroup_print_oom_group(oom_group);
978 mem_cgroup_scan_tasks(oom_group, oom_kill_memcg_member,
980 mem_cgroup_put(oom_group);
985 * Determines whether the kernel must panic because of the panic_on_oom sysctl.
987 static void check_panic_on_oom(struct oom_control *oc)
989 if (likely(!sysctl_panic_on_oom))
991 if (sysctl_panic_on_oom != 2) {
993 * panic_on_oom == 1 only affects CONSTRAINT_NONE, the kernel
994 * does not panic for cpuset, mempolicy, or memcg allocation
997 if (oc->constraint != CONSTRAINT_NONE)
1000 /* Do not panic for oom kills triggered by sysrq */
1001 if (is_sysrq_oom(oc))
1003 dump_header(oc, NULL);
1004 panic("Out of memory: %s panic_on_oom is enabled\n",
1005 sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide");
1008 static BLOCKING_NOTIFIER_HEAD(oom_notify_list);
1010 int register_oom_notifier(struct notifier_block *nb)
1012 return blocking_notifier_chain_register(&oom_notify_list, nb);
1014 EXPORT_SYMBOL_GPL(register_oom_notifier);
1016 int unregister_oom_notifier(struct notifier_block *nb)
1018 return blocking_notifier_chain_unregister(&oom_notify_list, nb);
1020 EXPORT_SYMBOL_GPL(unregister_oom_notifier);
1023 * out_of_memory - kill the "best" process when we run out of memory
1024 * @oc: pointer to struct oom_control
1026 * If we run out of memory, we have the choice between either
1027 * killing a random task (bad), letting the system crash (worse)
1028 * OR try to be smart about which process to kill. Note that we
1029 * don't have to be perfect here, we just have to be good.
1031 bool out_of_memory(struct oom_control *oc)
1033 unsigned long freed = 0;
1035 if (oom_killer_disabled)
1038 if (!is_memcg_oom(oc)) {
1039 blocking_notifier_call_chain(&oom_notify_list, 0, &freed);
1041 /* Got some memory back in the last second. */
1046 * If current has a pending SIGKILL or is exiting, then automatically
1047 * select it. The goal is to allow it to allocate so that it may
1048 * quickly exit and free its memory.
1050 if (task_will_free_mem(current)) {
1051 mark_oom_victim(current);
1052 wake_oom_reaper(current);
1057 * The OOM killer does not compensate for IO-less reclaim.
1058 * pagefault_out_of_memory lost its gfp context so we have to
1059 * make sure exclude 0 mask - all other users should have at least
1060 * ___GFP_DIRECT_RECLAIM to get here.
1062 if (oc->gfp_mask && !(oc->gfp_mask & __GFP_FS))
1066 * Check if there were limitations on the allocation (only relevant for
1067 * NUMA and memcg) that may require different handling.
1069 oc->constraint = constrained_alloc(oc);
1070 if (oc->constraint != CONSTRAINT_MEMORY_POLICY)
1071 oc->nodemask = NULL;
1072 check_panic_on_oom(oc);
1074 if (!is_memcg_oom(oc) && sysctl_oom_kill_allocating_task &&
1075 current->mm && !oom_unkillable_task(current, NULL, oc->nodemask) &&
1076 current->signal->oom_score_adj != OOM_SCORE_ADJ_MIN) {
1077 get_task_struct(current);
1078 oc->chosen = current;
1079 oom_kill_process(oc, "Out of memory (oom_kill_allocating_task)");
1083 select_bad_process(oc);
1084 /* Found nothing?!?! */
1086 dump_header(oc, NULL);
1087 pr_warn("Out of memory and no killable processes...\n");
1089 * If we got here due to an actual allocation at the
1090 * system level, we cannot survive this and will enter
1091 * an endless loop in the allocator. Bail out now.
1093 if (!is_sysrq_oom(oc) && !is_memcg_oom(oc))
1094 panic("System is deadlocked on memory\n");
1096 if (oc->chosen && oc->chosen != (void *)-1UL)
1097 oom_kill_process(oc, !is_memcg_oom(oc) ? "Out of memory" :
1098 "Memory cgroup out of memory");
1099 return !!oc->chosen;
1103 * The pagefault handler calls here because it is out of memory, so kill a
1104 * memory-hogging task. If oom_lock is held by somebody else, a parallel oom
1105 * killing is already in progress so do nothing.
1107 void pagefault_out_of_memory(void)
1109 struct oom_control oc = {
1117 if (mem_cgroup_oom_synchronize(true))
1120 if (!mutex_trylock(&oom_lock))
1123 mutex_unlock(&oom_lock);