2 * Detect hard and soft lockups on a system
4 * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
6 * Note: Most of this code is borrowed heavily from the original softlockup
7 * detector, so thanks to Ingo for the initial implementation.
8 * Some chunks also taken from the old x86-specific nmi watchdog code, thanks
9 * to those contributors as well.
12 #define pr_fmt(fmt) "NMI watchdog: " fmt
15 #include <linux/cpu.h>
16 #include <linux/nmi.h>
17 #include <linux/init.h>
18 #include <linux/module.h>
19 #include <linux/sysctl.h>
20 #include <linux/smpboot.h>
21 #include <linux/sched/rt.h>
22 #include <linux/tick.h>
23 #include <linux/workqueue.h>
24 #include <linux/sched/clock.h>
26 #include <asm/irq_regs.h>
27 #include <linux/kvm_para.h>
28 #include <linux/kthread.h>
30 static DEFINE_MUTEX(watchdog_proc_mutex);
32 #if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR)
33 unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED|NMI_WATCHDOG_ENABLED;
35 unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED;
37 int __read_mostly nmi_watchdog_enabled;
38 int __read_mostly soft_watchdog_enabled;
39 int __read_mostly watchdog_user_enabled;
40 int __read_mostly watchdog_thresh = 10;
43 int __read_mostly sysctl_softlockup_all_cpu_backtrace;
44 int __read_mostly sysctl_hardlockup_all_cpu_backtrace;
46 static struct cpumask watchdog_cpumask __read_mostly;
47 unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask);
49 /* Helper for online, unparked cpus. */
50 #define for_each_watchdog_cpu(cpu) \
51 for_each_cpu_and((cpu), cpu_online_mask, &watchdog_cpumask)
53 atomic_t watchdog_park_in_progress = ATOMIC_INIT(0);
56 * The 'watchdog_running' variable is set to 1 when the watchdog threads
57 * are registered/started and is set to 0 when the watchdog threads are
58 * unregistered/stopped, so it is an indicator whether the threads exist.
60 static int __read_mostly watchdog_running;
62 * If a subsystem has a need to deactivate the watchdog temporarily, it
63 * can use the suspend/resume interface to achieve this. The content of
64 * the 'watchdog_suspended' variable reflects this state. Existing threads
65 * are parked/unparked by the lockup_detector_{suspend|resume} functions
66 * (see comment blocks pertaining to those functions for further details).
68 * 'watchdog_suspended' also prevents threads from being registered/started
69 * or unregistered/stopped via parameters in /proc/sys/kernel, so the state
70 * of 'watchdog_running' cannot change while the watchdog is deactivated
71 * temporarily (see related code in 'proc' handlers).
73 static int __read_mostly watchdog_suspended;
75 static u64 __read_mostly sample_period;
77 static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
78 static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog);
79 static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer);
80 static DEFINE_PER_CPU(bool, softlockup_touch_sync);
81 static DEFINE_PER_CPU(bool, soft_watchdog_warn);
82 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts);
83 static DEFINE_PER_CPU(unsigned long, soft_lockup_hrtimer_cnt);
84 static DEFINE_PER_CPU(struct task_struct *, softlockup_task_ptr_saved);
85 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
86 static unsigned long soft_lockup_nmi_warn;
88 unsigned int __read_mostly softlockup_panic =
89 CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE;
91 static int __init softlockup_panic_setup(char *str)
93 softlockup_panic = simple_strtoul(str, NULL, 0);
97 __setup("softlockup_panic=", softlockup_panic_setup);
99 static int __init nowatchdog_setup(char *str)
101 watchdog_enabled = 0;
104 __setup("nowatchdog", nowatchdog_setup);
106 static int __init nosoftlockup_setup(char *str)
108 watchdog_enabled &= ~SOFT_WATCHDOG_ENABLED;
111 __setup("nosoftlockup", nosoftlockup_setup);
114 static int __init softlockup_all_cpu_backtrace_setup(char *str)
116 sysctl_softlockup_all_cpu_backtrace =
117 !!simple_strtol(str, NULL, 0);
120 __setup("softlockup_all_cpu_backtrace=", softlockup_all_cpu_backtrace_setup);
121 static int __init hardlockup_all_cpu_backtrace_setup(char *str)
123 sysctl_hardlockup_all_cpu_backtrace =
124 !!simple_strtol(str, NULL, 0);
127 __setup("hardlockup_all_cpu_backtrace=", hardlockup_all_cpu_backtrace_setup);
131 * Hard-lockup warnings should be triggered after just a few seconds. Soft-
132 * lockups can have false positives under extreme conditions. So we generally
133 * want a higher threshold for soft lockups than for hard lockups. So we couple
134 * the thresholds with a factor: we make the soft threshold twice the amount of
135 * time the hard threshold is.
137 static int get_softlockup_thresh(void)
139 return watchdog_thresh * 2;
143 * Returns seconds, approximately. We don't need nanosecond
144 * resolution, and we don't need to waste time with a big divide when
147 static unsigned long get_timestamp(void)
149 return running_clock() >> 30LL; /* 2^30 ~= 10^9 */
152 static void set_sample_period(void)
155 * convert watchdog_thresh from seconds to ns
156 * the divide by 5 is to give hrtimer several chances (two
157 * or three with the current relation between the soft
158 * and hard thresholds) to increment before the
159 * hardlockup detector generates a warning
161 sample_period = get_softlockup_thresh() * ((u64)NSEC_PER_SEC / 5);
164 /* Commands for resetting the watchdog */
165 static void __touch_watchdog(void)
167 __this_cpu_write(watchdog_touch_ts, get_timestamp());
171 * touch_softlockup_watchdog_sched - touch watchdog on scheduler stalls
173 * Call when the scheduler may have stalled for legitimate reasons
174 * preventing the watchdog task from executing - e.g. the scheduler
175 * entering idle state. This should only be used for scheduler events.
176 * Use touch_softlockup_watchdog() for everything else.
178 void touch_softlockup_watchdog_sched(void)
181 * Preemption can be enabled. It doesn't matter which CPU's timestamp
182 * gets zeroed here, so use the raw_ operation.
184 raw_cpu_write(watchdog_touch_ts, 0);
187 void touch_softlockup_watchdog(void)
189 touch_softlockup_watchdog_sched();
190 wq_watchdog_touch(raw_smp_processor_id());
192 EXPORT_SYMBOL(touch_softlockup_watchdog);
194 void touch_all_softlockup_watchdogs(void)
199 * this is done lockless
200 * do we care if a 0 races with a timestamp?
201 * all it means is the softlock check starts one cycle later
203 for_each_watchdog_cpu(cpu)
204 per_cpu(watchdog_touch_ts, cpu) = 0;
205 wq_watchdog_touch(-1);
208 void touch_softlockup_watchdog_sync(void)
210 __this_cpu_write(softlockup_touch_sync, true);
211 __this_cpu_write(watchdog_touch_ts, 0);
214 /* watchdog detector functions */
215 bool is_hardlockup(void)
217 unsigned long hrint = __this_cpu_read(hrtimer_interrupts);
219 if (__this_cpu_read(hrtimer_interrupts_saved) == hrint)
222 __this_cpu_write(hrtimer_interrupts_saved, hrint);
226 static int is_softlockup(unsigned long touch_ts)
228 unsigned long now = get_timestamp();
230 if ((watchdog_enabled & SOFT_WATCHDOG_ENABLED) && watchdog_thresh){
231 /* Warn about unreasonable delays. */
232 if (time_after(now, touch_ts + get_softlockup_thresh()))
233 return now - touch_ts;
238 static void watchdog_interrupt_count(void)
240 __this_cpu_inc(hrtimer_interrupts);
244 * These two functions are mostly architecture specific
245 * defining them as weak here.
247 int __weak watchdog_nmi_enable(unsigned int cpu)
251 void __weak watchdog_nmi_disable(unsigned int cpu)
255 static int watchdog_enable_all_cpus(void);
256 static void watchdog_disable_all_cpus(void);
258 /* watchdog kicker functions */
259 static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
261 unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts);
262 struct pt_regs *regs = get_irq_regs();
264 int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace;
266 if (atomic_read(&watchdog_park_in_progress) != 0)
267 return HRTIMER_NORESTART;
269 /* kick the hardlockup detector */
270 watchdog_interrupt_count();
272 /* kick the softlockup detector */
273 wake_up_process(__this_cpu_read(softlockup_watchdog));
276 hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period));
279 if (unlikely(__this_cpu_read(softlockup_touch_sync))) {
281 * If the time stamp was touched atomically
282 * make sure the scheduler tick is up to date.
284 __this_cpu_write(softlockup_touch_sync, false);
288 /* Clear the guest paused flag on watchdog reset */
289 kvm_check_and_clear_guest_paused();
291 return HRTIMER_RESTART;
294 /* check for a softlockup
295 * This is done by making sure a high priority task is
296 * being scheduled. The task touches the watchdog to
297 * indicate it is getting cpu time. If it hasn't then
298 * this is a good indication some task is hogging the cpu
300 duration = is_softlockup(touch_ts);
301 if (unlikely(duration)) {
303 * If a virtual machine is stopped by the host it can look to
304 * the watchdog like a soft lockup, check to see if the host
305 * stopped the vm before we issue the warning
307 if (kvm_check_and_clear_guest_paused())
308 return HRTIMER_RESTART;
311 if (__this_cpu_read(soft_watchdog_warn) == true) {
313 * When multiple processes are causing softlockups the
314 * softlockup detector only warns on the first one
315 * because the code relies on a full quiet cycle to
316 * re-arm. The second process prevents the quiet cycle
317 * and never gets reported. Use task pointers to detect
320 if (__this_cpu_read(softlockup_task_ptr_saved) !=
322 __this_cpu_write(soft_watchdog_warn, false);
325 return HRTIMER_RESTART;
328 if (softlockup_all_cpu_backtrace) {
329 /* Prevent multiple soft-lockup reports if one cpu is already
330 * engaged in dumping cpu back traces
332 if (test_and_set_bit(0, &soft_lockup_nmi_warn)) {
333 /* Someone else will report us. Let's give up */
334 __this_cpu_write(soft_watchdog_warn, true);
335 return HRTIMER_RESTART;
339 pr_emerg("BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
340 smp_processor_id(), duration,
341 current->comm, task_pid_nr(current));
342 __this_cpu_write(softlockup_task_ptr_saved, current);
344 print_irqtrace_events(current);
350 if (softlockup_all_cpu_backtrace) {
351 /* Avoid generating two back traces for current
352 * given that one is already made above
354 trigger_allbutself_cpu_backtrace();
356 clear_bit(0, &soft_lockup_nmi_warn);
357 /* Barrier to sync with other cpus */
358 smp_mb__after_atomic();
361 add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
362 if (softlockup_panic)
363 panic("softlockup: hung tasks");
364 __this_cpu_write(soft_watchdog_warn, true);
366 __this_cpu_write(soft_watchdog_warn, false);
368 return HRTIMER_RESTART;
371 static void watchdog_set_prio(unsigned int policy, unsigned int prio)
373 struct sched_param param = { .sched_priority = prio };
375 sched_setscheduler(current, policy, ¶m);
378 static void watchdog_enable(unsigned int cpu)
380 struct hrtimer *hrtimer = raw_cpu_ptr(&watchdog_hrtimer);
382 /* kick off the timer for the hardlockup detector */
383 hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
384 hrtimer->function = watchdog_timer_fn;
386 /* Enable the perf event */
387 watchdog_nmi_enable(cpu);
389 /* done here because hrtimer_start can only pin to smp_processor_id() */
390 hrtimer_start(hrtimer, ns_to_ktime(sample_period),
391 HRTIMER_MODE_REL_PINNED);
393 /* initialize timestamp */
394 watchdog_set_prio(SCHED_FIFO, MAX_RT_PRIO - 1);
398 static void watchdog_disable(unsigned int cpu)
400 struct hrtimer *hrtimer = raw_cpu_ptr(&watchdog_hrtimer);
402 watchdog_set_prio(SCHED_NORMAL, 0);
403 hrtimer_cancel(hrtimer);
404 /* disable the perf event */
405 watchdog_nmi_disable(cpu);
408 static void watchdog_cleanup(unsigned int cpu, bool online)
410 watchdog_disable(cpu);
413 static int watchdog_should_run(unsigned int cpu)
415 return __this_cpu_read(hrtimer_interrupts) !=
416 __this_cpu_read(soft_lockup_hrtimer_cnt);
420 * The watchdog thread function - touches the timestamp.
422 * It only runs once every sample_period seconds (4 seconds by
423 * default) to reset the softlockup timestamp. If this gets delayed
424 * for more than 2*watchdog_thresh seconds then the debug-printout
425 * triggers in watchdog_timer_fn().
427 static void watchdog(unsigned int cpu)
429 __this_cpu_write(soft_lockup_hrtimer_cnt,
430 __this_cpu_read(hrtimer_interrupts));
434 * watchdog_nmi_enable() clears the NMI_WATCHDOG_ENABLED bit in the
435 * failure path. Check for failures that can occur asynchronously -
436 * for example, when CPUs are on-lined - and shut down the hardware
437 * perf event on each CPU accordingly.
439 * The only non-obvious place this bit can be cleared is through
440 * watchdog_nmi_enable(), so a pr_info() is placed there. Placing a
441 * pr_info here would be too noisy as it would result in a message
442 * every few seconds if the hardlockup was disabled but the softlockup
445 if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED))
446 watchdog_nmi_disable(cpu);
449 static struct smp_hotplug_thread watchdog_threads = {
450 .store = &softlockup_watchdog,
451 .thread_should_run = watchdog_should_run,
452 .thread_fn = watchdog,
453 .thread_comm = "watchdog/%u",
454 .setup = watchdog_enable,
455 .cleanup = watchdog_cleanup,
456 .park = watchdog_disable,
457 .unpark = watchdog_enable,
461 * park all watchdog threads that are specified in 'watchdog_cpumask'
463 * This function returns an error if kthread_park() of a watchdog thread
464 * fails. In this situation, the watchdog threads of some CPUs can already
465 * be parked and the watchdog threads of other CPUs can still be runnable.
466 * Callers are expected to handle this special condition as appropriate in
469 * This function may only be called in a context that is protected against
470 * races with CPU hotplug - for example, via get_online_cpus().
472 static int watchdog_park_threads(void)
476 atomic_set(&watchdog_park_in_progress, 1);
478 for_each_watchdog_cpu(cpu) {
479 ret = kthread_park(per_cpu(softlockup_watchdog, cpu));
484 atomic_set(&watchdog_park_in_progress, 0);
490 * unpark all watchdog threads that are specified in 'watchdog_cpumask'
492 * This function may only be called in a context that is protected against
493 * races with CPU hotplug - for example, via get_online_cpus().
495 static void watchdog_unpark_threads(void)
499 for_each_watchdog_cpu(cpu)
500 kthread_unpark(per_cpu(softlockup_watchdog, cpu));
504 * Suspend the hard and soft lockup detector by parking the watchdog threads.
506 int lockup_detector_suspend(void)
511 mutex_lock(&watchdog_proc_mutex);
513 * Multiple suspend requests can be active in parallel (counted by
514 * the 'watchdog_suspended' variable). If the watchdog threads are
515 * running, the first caller takes care that they will be parked.
516 * The state of 'watchdog_running' cannot change while a suspend
517 * request is active (see related code in 'proc' handlers).
519 if (watchdog_running && !watchdog_suspended)
520 ret = watchdog_park_threads();
523 watchdog_suspended++;
525 watchdog_disable_all_cpus();
526 pr_err("Failed to suspend lockup detectors, disabled\n");
527 watchdog_enabled = 0;
530 mutex_unlock(&watchdog_proc_mutex);
536 * Resume the hard and soft lockup detector by unparking the watchdog threads.
538 void lockup_detector_resume(void)
540 mutex_lock(&watchdog_proc_mutex);
542 watchdog_suspended--;
544 * The watchdog threads are unparked if they were previously running
545 * and if there is no more active suspend request.
547 if (watchdog_running && !watchdog_suspended)
548 watchdog_unpark_threads();
550 mutex_unlock(&watchdog_proc_mutex);
554 static int update_watchdog_all_cpus(void)
558 ret = watchdog_park_threads();
562 watchdog_unpark_threads();
567 static int watchdog_enable_all_cpus(void)
571 if (!watchdog_running) {
572 err = smpboot_register_percpu_thread_cpumask(&watchdog_threads,
575 pr_err("Failed to create watchdog threads, disabled\n");
577 watchdog_running = 1;
580 * Enable/disable the lockup detectors or
581 * change the sample period 'on the fly'.
583 err = update_watchdog_all_cpus();
586 watchdog_disable_all_cpus();
587 pr_err("Failed to update lockup detectors, disabled\n");
592 watchdog_enabled = 0;
597 static void watchdog_disable_all_cpus(void)
599 if (watchdog_running) {
600 watchdog_running = 0;
601 smpboot_unregister_percpu_thread(&watchdog_threads);
608 * Update the run state of the lockup detectors.
610 static int proc_watchdog_update(void)
615 * Watchdog threads won't be started if they are already active.
616 * The 'watchdog_running' variable in watchdog_*_all_cpus() takes
617 * care of this. If those threads are already active, the sample
618 * period will be updated and the lockup detectors will be enabled
619 * or disabled 'on the fly'.
621 if (watchdog_enabled && watchdog_thresh)
622 err = watchdog_enable_all_cpus();
624 watchdog_disable_all_cpus();
631 * common function for watchdog, nmi_watchdog and soft_watchdog parameter
633 * caller | table->data points to | 'which' contains the flag(s)
634 * -------------------|-----------------------|-----------------------------
635 * proc_watchdog | watchdog_user_enabled | NMI_WATCHDOG_ENABLED or'ed
636 * | | with SOFT_WATCHDOG_ENABLED
637 * -------------------|-----------------------|-----------------------------
638 * proc_nmi_watchdog | nmi_watchdog_enabled | NMI_WATCHDOG_ENABLED
639 * -------------------|-----------------------|-----------------------------
640 * proc_soft_watchdog | soft_watchdog_enabled | SOFT_WATCHDOG_ENABLED
642 static int proc_watchdog_common(int which, struct ctl_table *table, int write,
643 void __user *buffer, size_t *lenp, loff_t *ppos)
646 int *watchdog_param = (int *)table->data;
649 mutex_lock(&watchdog_proc_mutex);
651 if (watchdog_suspended) {
652 /* no parameter changes allowed while watchdog is suspended */
658 * If the parameter is being read return the state of the corresponding
659 * bit(s) in 'watchdog_enabled', else update 'watchdog_enabled' and the
660 * run state of the lockup detectors.
663 *watchdog_param = (watchdog_enabled & which) != 0;
664 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
666 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
671 * There is a race window between fetching the current value
672 * from 'watchdog_enabled' and storing the new value. During
673 * this race window, watchdog_nmi_enable() can sneak in and
674 * clear the NMI_WATCHDOG_ENABLED bit in 'watchdog_enabled'.
675 * The 'cmpxchg' detects this race and the loop retries.
678 old = watchdog_enabled;
680 * If the parameter value is not zero set the
681 * corresponding bit(s), else clear it(them).
687 } while (cmpxchg(&watchdog_enabled, old, new) != old);
690 * Update the run state of the lockup detectors. There is _no_
691 * need to check the value returned by proc_watchdog_update()
692 * and to restore the previous value of 'watchdog_enabled' as
693 * both lockup detectors are disabled if proc_watchdog_update()
699 err = proc_watchdog_update();
702 mutex_unlock(&watchdog_proc_mutex);
708 * /proc/sys/kernel/watchdog
710 int proc_watchdog(struct ctl_table *table, int write,
711 void __user *buffer, size_t *lenp, loff_t *ppos)
713 return proc_watchdog_common(NMI_WATCHDOG_ENABLED|SOFT_WATCHDOG_ENABLED,
714 table, write, buffer, lenp, ppos);
718 * /proc/sys/kernel/nmi_watchdog
720 int proc_nmi_watchdog(struct ctl_table *table, int write,
721 void __user *buffer, size_t *lenp, loff_t *ppos)
723 return proc_watchdog_common(NMI_WATCHDOG_ENABLED,
724 table, write, buffer, lenp, ppos);
728 * /proc/sys/kernel/soft_watchdog
730 int proc_soft_watchdog(struct ctl_table *table, int write,
731 void __user *buffer, size_t *lenp, loff_t *ppos)
733 return proc_watchdog_common(SOFT_WATCHDOG_ENABLED,
734 table, write, buffer, lenp, ppos);
738 * /proc/sys/kernel/watchdog_thresh
740 int proc_watchdog_thresh(struct ctl_table *table, int write,
741 void __user *buffer, size_t *lenp, loff_t *ppos)
746 mutex_lock(&watchdog_proc_mutex);
748 if (watchdog_suspended) {
749 /* no parameter changes allowed while watchdog is suspended */
754 old = ACCESS_ONCE(watchdog_thresh);
755 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
761 * Update the sample period. Restore on failure.
763 new = ACCESS_ONCE(watchdog_thresh);
768 err = proc_watchdog_update();
770 watchdog_thresh = old;
774 mutex_unlock(&watchdog_proc_mutex);
780 * The cpumask is the mask of possible cpus that the watchdog can run
781 * on, not the mask of cpus it is actually running on. This allows the
782 * user to specify a mask that will include cpus that have not yet
783 * been brought online, if desired.
785 int proc_watchdog_cpumask(struct ctl_table *table, int write,
786 void __user *buffer, size_t *lenp, loff_t *ppos)
791 mutex_lock(&watchdog_proc_mutex);
793 if (watchdog_suspended) {
794 /* no parameter changes allowed while watchdog is suspended */
799 err = proc_do_large_bitmap(table, write, buffer, lenp, ppos);
801 /* Remove impossible cpus to keep sysctl output cleaner. */
802 cpumask_and(&watchdog_cpumask, &watchdog_cpumask,
805 if (watchdog_running) {
807 * Failure would be due to being unable to allocate
808 * a temporary cpumask, so we are likely not in a
809 * position to do much else to make things better.
811 if (smpboot_update_cpumask_percpu_thread(
812 &watchdog_threads, &watchdog_cpumask) != 0)
813 pr_err("cpumask update failed\n");
817 mutex_unlock(&watchdog_proc_mutex);
822 #endif /* CONFIG_SYSCTL */
824 void __init lockup_detector_init(void)
828 #ifdef CONFIG_NO_HZ_FULL
829 if (tick_nohz_full_enabled()) {
830 pr_info("Disabling watchdog on nohz_full cores by default\n");
831 cpumask_copy(&watchdog_cpumask, housekeeping_mask);
833 cpumask_copy(&watchdog_cpumask, cpu_possible_mask);
835 cpumask_copy(&watchdog_cpumask, cpu_possible_mask);
838 if (watchdog_enabled)
839 watchdog_enable_all_cpus();