2 * Detect hard and soft lockups on a system
4 * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
6 * Note: Most of this code is borrowed heavily from the original softlockup
7 * detector, so thanks to Ingo for the initial implementation.
8 * Some chunks also taken from the old x86-specific nmi watchdog code, thanks
9 * to those contributors as well.
12 #define pr_fmt(fmt) "NMI watchdog: " fmt
15 #include <linux/cpu.h>
16 #include <linux/nmi.h>
17 #include <linux/init.h>
18 #include <linux/module.h>
19 #include <linux/sysctl.h>
20 #include <linux/smpboot.h>
21 #include <linux/sched/rt.h>
22 #include <uapi/linux/sched/types.h>
23 #include <linux/tick.h>
24 #include <linux/workqueue.h>
25 #include <linux/sched/clock.h>
26 #include <linux/sched/debug.h>
28 #include <asm/irq_regs.h>
29 #include <linux/kvm_para.h>
30 #include <linux/kthread.h>
32 /* Watchdog configuration */
33 static DEFINE_MUTEX(watchdog_proc_mutex);
35 int __read_mostly nmi_watchdog_enabled;
37 #if defined(CONFIG_HARDLOCKUP_DETECTOR) || defined(CONFIG_HAVE_NMI_WATCHDOG)
38 unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED |
41 unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED;
44 #ifdef CONFIG_HARDLOCKUP_DETECTOR
47 * Should we panic when a soft-lockup or hard-lockup occurs:
49 unsigned int __read_mostly hardlockup_panic =
50 CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE;
52 * We may not want to enable hard lockup detection by default in all cases,
53 * for example when running the kernel as a guest on a hypervisor. In these
54 * cases this function can be called to disable hard lockup detection. This
55 * function should only be executed once by the boot processor before the
56 * kernel command line parameters are parsed, because otherwise it is not
57 * possible to override this in hardlockup_panic_setup().
59 void hardlockup_detector_disable(void)
61 watchdog_enabled &= ~NMI_WATCHDOG_ENABLED;
64 static int __init hardlockup_panic_setup(char *str)
66 if (!strncmp(str, "panic", 5))
68 else if (!strncmp(str, "nopanic", 7))
70 else if (!strncmp(str, "0", 1))
71 watchdog_enabled &= ~NMI_WATCHDOG_ENABLED;
72 else if (!strncmp(str, "1", 1))
73 watchdog_enabled |= NMI_WATCHDOG_ENABLED;
76 __setup("nmi_watchdog=", hardlockup_panic_setup);
80 #ifdef CONFIG_SOFTLOCKUP_DETECTOR
81 int __read_mostly soft_watchdog_enabled;
84 int __read_mostly watchdog_user_enabled;
85 int __read_mostly watchdog_thresh = 10;
88 int __read_mostly sysctl_softlockup_all_cpu_backtrace;
89 int __read_mostly sysctl_hardlockup_all_cpu_backtrace;
91 struct cpumask watchdog_cpumask __read_mostly;
92 unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask);
95 * The 'watchdog_running' variable is set to 1 when the watchdog threads
96 * are registered/started and is set to 0 when the watchdog threads are
97 * unregistered/stopped, so it is an indicator whether the threads exist.
99 static int __read_mostly watchdog_running;
101 * If a subsystem has a need to deactivate the watchdog temporarily, it
102 * can use the suspend/resume interface to achieve this. The content of
103 * the 'watchdog_suspended' variable reflects this state. Existing threads
104 * are parked/unparked by the lockup_detector_{suspend|resume} functions
105 * (see comment blocks pertaining to those functions for further details).
107 * 'watchdog_suspended' also prevents threads from being registered/started
108 * or unregistered/stopped via parameters in /proc/sys/kernel, so the state
109 * of 'watchdog_running' cannot change while the watchdog is deactivated
110 * temporarily (see related code in 'proc' handlers).
112 int __read_mostly watchdog_suspended;
115 * These functions can be overridden if an architecture implements its
116 * own hardlockup detector.
118 int __weak watchdog_nmi_enable(unsigned int cpu)
122 void __weak watchdog_nmi_disable(unsigned int cpu)
126 #ifdef CONFIG_SOFTLOCKUP_DETECTOR
128 /* Helper for online, unparked cpus. */
129 #define for_each_watchdog_cpu(cpu) \
130 for_each_cpu_and((cpu), cpu_online_mask, &watchdog_cpumask)
132 atomic_t watchdog_park_in_progress = ATOMIC_INIT(0);
134 static u64 __read_mostly sample_period;
136 static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
137 static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog);
138 static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer);
139 static DEFINE_PER_CPU(bool, softlockup_touch_sync);
140 static DEFINE_PER_CPU(bool, soft_watchdog_warn);
141 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts);
142 static DEFINE_PER_CPU(unsigned long, soft_lockup_hrtimer_cnt);
143 static DEFINE_PER_CPU(struct task_struct *, softlockup_task_ptr_saved);
144 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
145 static unsigned long soft_lockup_nmi_warn;
147 unsigned int __read_mostly softlockup_panic =
148 CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE;
150 static int __init softlockup_panic_setup(char *str)
152 softlockup_panic = simple_strtoul(str, NULL, 0);
156 __setup("softlockup_panic=", softlockup_panic_setup);
158 static int __init nowatchdog_setup(char *str)
160 watchdog_enabled = 0;
163 __setup("nowatchdog", nowatchdog_setup);
165 static int __init nosoftlockup_setup(char *str)
167 watchdog_enabled &= ~SOFT_WATCHDOG_ENABLED;
170 __setup("nosoftlockup", nosoftlockup_setup);
173 static int __init softlockup_all_cpu_backtrace_setup(char *str)
175 sysctl_softlockup_all_cpu_backtrace =
176 !!simple_strtol(str, NULL, 0);
179 __setup("softlockup_all_cpu_backtrace=", softlockup_all_cpu_backtrace_setup);
180 #ifdef CONFIG_HARDLOCKUP_DETECTOR
181 static int __init hardlockup_all_cpu_backtrace_setup(char *str)
183 sysctl_hardlockup_all_cpu_backtrace =
184 !!simple_strtol(str, NULL, 0);
187 __setup("hardlockup_all_cpu_backtrace=", hardlockup_all_cpu_backtrace_setup);
192 * Hard-lockup warnings should be triggered after just a few seconds. Soft-
193 * lockups can have false positives under extreme conditions. So we generally
194 * want a higher threshold for soft lockups than for hard lockups. So we couple
195 * the thresholds with a factor: we make the soft threshold twice the amount of
196 * time the hard threshold is.
198 static int get_softlockup_thresh(void)
200 return watchdog_thresh * 2;
204 * Returns seconds, approximately. We don't need nanosecond
205 * resolution, and we don't need to waste time with a big divide when
208 static unsigned long get_timestamp(void)
210 return running_clock() >> 30LL; /* 2^30 ~= 10^9 */
213 static void set_sample_period(void)
216 * convert watchdog_thresh from seconds to ns
217 * the divide by 5 is to give hrtimer several chances (two
218 * or three with the current relation between the soft
219 * and hard thresholds) to increment before the
220 * hardlockup detector generates a warning
222 sample_period = get_softlockup_thresh() * ((u64)NSEC_PER_SEC / 5);
225 /* Commands for resetting the watchdog */
226 static void __touch_watchdog(void)
228 __this_cpu_write(watchdog_touch_ts, get_timestamp());
232 * touch_softlockup_watchdog_sched - touch watchdog on scheduler stalls
234 * Call when the scheduler may have stalled for legitimate reasons
235 * preventing the watchdog task from executing - e.g. the scheduler
236 * entering idle state. This should only be used for scheduler events.
237 * Use touch_softlockup_watchdog() for everything else.
239 void touch_softlockup_watchdog_sched(void)
242 * Preemption can be enabled. It doesn't matter which CPU's timestamp
243 * gets zeroed here, so use the raw_ operation.
245 raw_cpu_write(watchdog_touch_ts, 0);
248 void touch_softlockup_watchdog(void)
250 touch_softlockup_watchdog_sched();
251 wq_watchdog_touch(raw_smp_processor_id());
253 EXPORT_SYMBOL(touch_softlockup_watchdog);
255 void touch_all_softlockup_watchdogs(void)
260 * this is done lockless
261 * do we care if a 0 races with a timestamp?
262 * all it means is the softlock check starts one cycle later
264 for_each_watchdog_cpu(cpu)
265 per_cpu(watchdog_touch_ts, cpu) = 0;
266 wq_watchdog_touch(-1);
269 void touch_softlockup_watchdog_sync(void)
271 __this_cpu_write(softlockup_touch_sync, true);
272 __this_cpu_write(watchdog_touch_ts, 0);
275 static int is_softlockup(unsigned long touch_ts)
277 unsigned long now = get_timestamp();
279 if ((watchdog_enabled & SOFT_WATCHDOG_ENABLED) && watchdog_thresh){
280 /* Warn about unreasonable delays. */
281 if (time_after(now, touch_ts + get_softlockup_thresh()))
282 return now - touch_ts;
287 /* watchdog detector functions */
288 bool is_hardlockup(void)
290 unsigned long hrint = __this_cpu_read(hrtimer_interrupts);
292 if (__this_cpu_read(hrtimer_interrupts_saved) == hrint)
295 __this_cpu_write(hrtimer_interrupts_saved, hrint);
299 static void watchdog_interrupt_count(void)
301 __this_cpu_inc(hrtimer_interrupts);
304 static int watchdog_enable_all_cpus(void);
305 static void watchdog_disable_all_cpus(void);
307 /* watchdog kicker functions */
308 static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
310 unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts);
311 struct pt_regs *regs = get_irq_regs();
313 int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace;
315 if (atomic_read(&watchdog_park_in_progress) != 0)
316 return HRTIMER_NORESTART;
318 /* kick the hardlockup detector */
319 watchdog_interrupt_count();
321 /* kick the softlockup detector */
322 wake_up_process(__this_cpu_read(softlockup_watchdog));
325 hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period));
328 if (unlikely(__this_cpu_read(softlockup_touch_sync))) {
330 * If the time stamp was touched atomically
331 * make sure the scheduler tick is up to date.
333 __this_cpu_write(softlockup_touch_sync, false);
337 /* Clear the guest paused flag on watchdog reset */
338 kvm_check_and_clear_guest_paused();
340 return HRTIMER_RESTART;
343 /* check for a softlockup
344 * This is done by making sure a high priority task is
345 * being scheduled. The task touches the watchdog to
346 * indicate it is getting cpu time. If it hasn't then
347 * this is a good indication some task is hogging the cpu
349 duration = is_softlockup(touch_ts);
350 if (unlikely(duration)) {
352 * If a virtual machine is stopped by the host it can look to
353 * the watchdog like a soft lockup, check to see if the host
354 * stopped the vm before we issue the warning
356 if (kvm_check_and_clear_guest_paused())
357 return HRTIMER_RESTART;
360 if (__this_cpu_read(soft_watchdog_warn) == true) {
362 * When multiple processes are causing softlockups the
363 * softlockup detector only warns on the first one
364 * because the code relies on a full quiet cycle to
365 * re-arm. The second process prevents the quiet cycle
366 * and never gets reported. Use task pointers to detect
369 if (__this_cpu_read(softlockup_task_ptr_saved) !=
371 __this_cpu_write(soft_watchdog_warn, false);
374 return HRTIMER_RESTART;
377 if (softlockup_all_cpu_backtrace) {
378 /* Prevent multiple soft-lockup reports if one cpu is already
379 * engaged in dumping cpu back traces
381 if (test_and_set_bit(0, &soft_lockup_nmi_warn)) {
382 /* Someone else will report us. Let's give up */
383 __this_cpu_write(soft_watchdog_warn, true);
384 return HRTIMER_RESTART;
388 pr_emerg("BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
389 smp_processor_id(), duration,
390 current->comm, task_pid_nr(current));
391 __this_cpu_write(softlockup_task_ptr_saved, current);
393 print_irqtrace_events(current);
399 if (softlockup_all_cpu_backtrace) {
400 /* Avoid generating two back traces for current
401 * given that one is already made above
403 trigger_allbutself_cpu_backtrace();
405 clear_bit(0, &soft_lockup_nmi_warn);
406 /* Barrier to sync with other cpus */
407 smp_mb__after_atomic();
410 add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
411 if (softlockup_panic)
412 panic("softlockup: hung tasks");
413 __this_cpu_write(soft_watchdog_warn, true);
415 __this_cpu_write(soft_watchdog_warn, false);
417 return HRTIMER_RESTART;
420 static void watchdog_set_prio(unsigned int policy, unsigned int prio)
422 struct sched_param param = { .sched_priority = prio };
424 sched_setscheduler(current, policy, ¶m);
427 static void watchdog_enable(unsigned int cpu)
429 struct hrtimer *hrtimer = raw_cpu_ptr(&watchdog_hrtimer);
431 /* kick off the timer for the hardlockup detector */
432 hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
433 hrtimer->function = watchdog_timer_fn;
435 /* Enable the perf event */
436 watchdog_nmi_enable(cpu);
438 /* done here because hrtimer_start can only pin to smp_processor_id() */
439 hrtimer_start(hrtimer, ns_to_ktime(sample_period),
440 HRTIMER_MODE_REL_PINNED);
442 /* initialize timestamp */
443 watchdog_set_prio(SCHED_FIFO, MAX_RT_PRIO - 1);
447 static void watchdog_disable(unsigned int cpu)
449 struct hrtimer *hrtimer = raw_cpu_ptr(&watchdog_hrtimer);
451 watchdog_set_prio(SCHED_NORMAL, 0);
452 hrtimer_cancel(hrtimer);
453 /* disable the perf event */
454 watchdog_nmi_disable(cpu);
457 static void watchdog_cleanup(unsigned int cpu, bool online)
459 watchdog_disable(cpu);
462 static int watchdog_should_run(unsigned int cpu)
464 return __this_cpu_read(hrtimer_interrupts) !=
465 __this_cpu_read(soft_lockup_hrtimer_cnt);
469 * The watchdog thread function - touches the timestamp.
471 * It only runs once every sample_period seconds (4 seconds by
472 * default) to reset the softlockup timestamp. If this gets delayed
473 * for more than 2*watchdog_thresh seconds then the debug-printout
474 * triggers in watchdog_timer_fn().
476 static void watchdog(unsigned int cpu)
478 __this_cpu_write(soft_lockup_hrtimer_cnt,
479 __this_cpu_read(hrtimer_interrupts));
483 * watchdog_nmi_enable() clears the NMI_WATCHDOG_ENABLED bit in the
484 * failure path. Check for failures that can occur asynchronously -
485 * for example, when CPUs are on-lined - and shut down the hardware
486 * perf event on each CPU accordingly.
488 * The only non-obvious place this bit can be cleared is through
489 * watchdog_nmi_enable(), so a pr_info() is placed there. Placing a
490 * pr_info here would be too noisy as it would result in a message
491 * every few seconds if the hardlockup was disabled but the softlockup
494 if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED))
495 watchdog_nmi_disable(cpu);
498 static struct smp_hotplug_thread watchdog_threads = {
499 .store = &softlockup_watchdog,
500 .thread_should_run = watchdog_should_run,
501 .thread_fn = watchdog,
502 .thread_comm = "watchdog/%u",
503 .setup = watchdog_enable,
504 .cleanup = watchdog_cleanup,
505 .park = watchdog_disable,
506 .unpark = watchdog_enable,
510 * park all watchdog threads that are specified in 'watchdog_cpumask'
512 * This function returns an error if kthread_park() of a watchdog thread
513 * fails. In this situation, the watchdog threads of some CPUs can already
514 * be parked and the watchdog threads of other CPUs can still be runnable.
515 * Callers are expected to handle this special condition as appropriate in
518 * This function may only be called in a context that is protected against
519 * races with CPU hotplug - for example, via get_online_cpus().
521 static int watchdog_park_threads(void)
525 atomic_set(&watchdog_park_in_progress, 1);
527 for_each_watchdog_cpu(cpu) {
528 ret = kthread_park(per_cpu(softlockup_watchdog, cpu));
533 atomic_set(&watchdog_park_in_progress, 0);
539 * unpark all watchdog threads that are specified in 'watchdog_cpumask'
541 * This function may only be called in a context that is protected against
542 * races with CPU hotplug - for example, via get_online_cpus().
544 static void watchdog_unpark_threads(void)
548 for_each_watchdog_cpu(cpu)
549 kthread_unpark(per_cpu(softlockup_watchdog, cpu));
552 static int update_watchdog_all_cpus(void)
556 ret = watchdog_park_threads();
560 watchdog_unpark_threads();
565 static int watchdog_enable_all_cpus(void)
569 if (!watchdog_running) {
570 err = smpboot_register_percpu_thread_cpumask(&watchdog_threads,
573 pr_err("Failed to create watchdog threads, disabled\n");
575 watchdog_running = 1;
578 * Enable/disable the lockup detectors or
579 * change the sample period 'on the fly'.
581 err = update_watchdog_all_cpus();
584 watchdog_disable_all_cpus();
585 pr_err("Failed to update lockup detectors, disabled\n");
590 watchdog_enabled = 0;
595 static void watchdog_disable_all_cpus(void)
597 if (watchdog_running) {
598 watchdog_running = 0;
599 smpboot_unregister_percpu_thread(&watchdog_threads);
603 #else /* SOFTLOCKUP */
604 static int watchdog_park_threads(void)
609 static void watchdog_unpark_threads(void)
613 static int watchdog_enable_all_cpus(void)
618 static void watchdog_disable_all_cpus(void)
622 static void set_sample_period(void)
625 #endif /* SOFTLOCKUP */
628 * Suspend the hard and soft lockup detector by parking the watchdog threads.
630 int lockup_detector_suspend(void)
635 mutex_lock(&watchdog_proc_mutex);
637 * Multiple suspend requests can be active in parallel (counted by
638 * the 'watchdog_suspended' variable). If the watchdog threads are
639 * running, the first caller takes care that they will be parked.
640 * The state of 'watchdog_running' cannot change while a suspend
641 * request is active (see related code in 'proc' handlers).
643 if (watchdog_running && !watchdog_suspended)
644 ret = watchdog_park_threads();
647 watchdog_suspended++;
649 watchdog_disable_all_cpus();
650 pr_err("Failed to suspend lockup detectors, disabled\n");
651 watchdog_enabled = 0;
654 mutex_unlock(&watchdog_proc_mutex);
660 * Resume the hard and soft lockup detector by unparking the watchdog threads.
662 void lockup_detector_resume(void)
664 mutex_lock(&watchdog_proc_mutex);
666 watchdog_suspended--;
668 * The watchdog threads are unparked if they were previously running
669 * and if there is no more active suspend request.
671 if (watchdog_running && !watchdog_suspended)
672 watchdog_unpark_threads();
674 mutex_unlock(&watchdog_proc_mutex);
681 * Update the run state of the lockup detectors.
683 static int proc_watchdog_update(void)
688 * Watchdog threads won't be started if they are already active.
689 * The 'watchdog_running' variable in watchdog_*_all_cpus() takes
690 * care of this. If those threads are already active, the sample
691 * period will be updated and the lockup detectors will be enabled
692 * or disabled 'on the fly'.
694 if (watchdog_enabled && watchdog_thresh)
695 err = watchdog_enable_all_cpus();
697 watchdog_disable_all_cpus();
704 * common function for watchdog, nmi_watchdog and soft_watchdog parameter
706 * caller | table->data points to | 'which' contains the flag(s)
707 * -------------------|-----------------------|-----------------------------
708 * proc_watchdog | watchdog_user_enabled | NMI_WATCHDOG_ENABLED or'ed
709 * | | with SOFT_WATCHDOG_ENABLED
710 * -------------------|-----------------------|-----------------------------
711 * proc_nmi_watchdog | nmi_watchdog_enabled | NMI_WATCHDOG_ENABLED
712 * -------------------|-----------------------|-----------------------------
713 * proc_soft_watchdog | soft_watchdog_enabled | SOFT_WATCHDOG_ENABLED
715 static int proc_watchdog_common(int which, struct ctl_table *table, int write,
716 void __user *buffer, size_t *lenp, loff_t *ppos)
719 int *watchdog_param = (int *)table->data;
722 mutex_lock(&watchdog_proc_mutex);
724 if (watchdog_suspended) {
725 /* no parameter changes allowed while watchdog is suspended */
731 * If the parameter is being read return the state of the corresponding
732 * bit(s) in 'watchdog_enabled', else update 'watchdog_enabled' and the
733 * run state of the lockup detectors.
736 *watchdog_param = (watchdog_enabled & which) != 0;
737 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
739 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
744 * There is a race window between fetching the current value
745 * from 'watchdog_enabled' and storing the new value. During
746 * this race window, watchdog_nmi_enable() can sneak in and
747 * clear the NMI_WATCHDOG_ENABLED bit in 'watchdog_enabled'.
748 * The 'cmpxchg' detects this race and the loop retries.
751 old = watchdog_enabled;
753 * If the parameter value is not zero set the
754 * corresponding bit(s), else clear it(them).
760 } while (cmpxchg(&watchdog_enabled, old, new) != old);
763 * Update the run state of the lockup detectors. There is _no_
764 * need to check the value returned by proc_watchdog_update()
765 * and to restore the previous value of 'watchdog_enabled' as
766 * both lockup detectors are disabled if proc_watchdog_update()
772 err = proc_watchdog_update();
775 mutex_unlock(&watchdog_proc_mutex);
781 * /proc/sys/kernel/watchdog
783 int proc_watchdog(struct ctl_table *table, int write,
784 void __user *buffer, size_t *lenp, loff_t *ppos)
786 return proc_watchdog_common(NMI_WATCHDOG_ENABLED|SOFT_WATCHDOG_ENABLED,
787 table, write, buffer, lenp, ppos);
791 * /proc/sys/kernel/nmi_watchdog
793 int proc_nmi_watchdog(struct ctl_table *table, int write,
794 void __user *buffer, size_t *lenp, loff_t *ppos)
796 return proc_watchdog_common(NMI_WATCHDOG_ENABLED,
797 table, write, buffer, lenp, ppos);
801 * /proc/sys/kernel/soft_watchdog
803 int proc_soft_watchdog(struct ctl_table *table, int write,
804 void __user *buffer, size_t *lenp, loff_t *ppos)
806 return proc_watchdog_common(SOFT_WATCHDOG_ENABLED,
807 table, write, buffer, lenp, ppos);
811 * /proc/sys/kernel/watchdog_thresh
813 int proc_watchdog_thresh(struct ctl_table *table, int write,
814 void __user *buffer, size_t *lenp, loff_t *ppos)
819 mutex_lock(&watchdog_proc_mutex);
821 if (watchdog_suspended) {
822 /* no parameter changes allowed while watchdog is suspended */
827 old = ACCESS_ONCE(watchdog_thresh);
828 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
834 * Update the sample period. Restore on failure.
836 new = ACCESS_ONCE(watchdog_thresh);
841 err = proc_watchdog_update();
843 watchdog_thresh = old;
847 mutex_unlock(&watchdog_proc_mutex);
853 * The cpumask is the mask of possible cpus that the watchdog can run
854 * on, not the mask of cpus it is actually running on. This allows the
855 * user to specify a mask that will include cpus that have not yet
856 * been brought online, if desired.
858 int proc_watchdog_cpumask(struct ctl_table *table, int write,
859 void __user *buffer, size_t *lenp, loff_t *ppos)
864 mutex_lock(&watchdog_proc_mutex);
866 if (watchdog_suspended) {
867 /* no parameter changes allowed while watchdog is suspended */
872 err = proc_do_large_bitmap(table, write, buffer, lenp, ppos);
874 /* Remove impossible cpus to keep sysctl output cleaner. */
875 cpumask_and(&watchdog_cpumask, &watchdog_cpumask,
878 if (watchdog_running) {
880 * Failure would be due to being unable to allocate
881 * a temporary cpumask, so we are likely not in a
882 * position to do much else to make things better.
884 #ifdef CONFIG_SOFTLOCKUP_DETECTOR
885 if (smpboot_update_cpumask_percpu_thread(
886 &watchdog_threads, &watchdog_cpumask) != 0)
887 pr_err("cpumask update failed\n");
892 mutex_unlock(&watchdog_proc_mutex);
897 #endif /* CONFIG_SYSCTL */
899 void __init lockup_detector_init(void)
903 #ifdef CONFIG_NO_HZ_FULL
904 if (tick_nohz_full_enabled()) {
905 pr_info("Disabling watchdog on nohz_full cores by default\n");
906 cpumask_copy(&watchdog_cpumask, housekeeping_mask);
908 cpumask_copy(&watchdog_cpumask, cpu_possible_mask);
910 cpumask_copy(&watchdog_cpumask, cpu_possible_mask);
913 if (watchdog_enabled)
914 watchdog_enable_all_cpus();