4 #include <uapi/linux/sched.h>
6 #include <linux/sched/prio.h>
8 #include <linux/capability.h>
9 #include <linux/mutex.h>
10 #include <linux/plist.h>
11 #include <linux/mm_types.h>
12 #include <asm/ptrace.h>
14 #include <linux/sem.h>
15 #include <linux/shm.h>
16 #include <linux/signal.h>
17 #include <linux/signal_types.h>
18 #include <linux/pid.h>
19 #include <linux/seccomp.h>
20 #include <linux/rculist.h>
21 #include <linux/rtmutex.h>
23 #include <linux/resource.h>
24 #include <linux/hrtimer.h>
25 #include <linux/kcov.h>
26 #include <linux/task_io_accounting.h>
27 #include <linux/latencytop.h>
28 #include <linux/cred.h>
29 #include <linux/gfp.h>
30 #include <linux/topology.h>
31 #include <linux/magic.h>
32 #include <linux/cgroup-defs.h>
37 struct futex_pi_state;
38 struct robust_list_head;
41 struct perf_event_context;
47 struct sighand_struct;
49 extern unsigned long total_forks;
50 extern int nr_threads;
51 DECLARE_PER_CPU(unsigned long, process_counts);
52 extern int nr_processes(void);
53 extern unsigned long nr_running(void);
54 extern bool single_task_running(void);
55 extern unsigned long nr_iowait(void);
56 extern unsigned long nr_iowait_cpu(int cpu);
57 extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load);
59 #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
60 extern void cpu_load_update_nohz_start(void);
61 extern void cpu_load_update_nohz_stop(void);
63 static inline void cpu_load_update_nohz_start(void) { }
64 static inline void cpu_load_update_nohz_stop(void) { }
67 extern void dump_cpu_task(int cpu);
72 #ifdef CONFIG_SCHED_DEBUG
73 extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m);
74 extern void proc_sched_set_task(struct task_struct *p);
78 * Task state bitmask. NOTE! These bits are also
79 * encoded in fs/proc/array.c: get_task_state().
81 * We have two separate sets of flags: task->state
82 * is about runnability, while task->exit_state are
83 * about the task exiting. Confusing, but this way
84 * modifying one set can't modify the other one by
87 #define TASK_RUNNING 0
88 #define TASK_INTERRUPTIBLE 1
89 #define TASK_UNINTERRUPTIBLE 2
90 #define __TASK_STOPPED 4
91 #define __TASK_TRACED 8
92 /* in tsk->exit_state */
94 #define EXIT_ZOMBIE 32
95 #define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD)
96 /* in tsk->state again */
98 #define TASK_WAKEKILL 128
99 #define TASK_WAKING 256
100 #define TASK_PARKED 512
101 #define TASK_NOLOAD 1024
102 #define TASK_NEW 2048
103 #define TASK_STATE_MAX 4096
105 #define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWPNn"
107 /* Convenience macros for the sake of set_current_state */
108 #define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
109 #define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED)
110 #define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED)
112 #define TASK_IDLE (TASK_UNINTERRUPTIBLE | TASK_NOLOAD)
114 /* Convenience macros for the sake of wake_up */
115 #define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
116 #define TASK_ALL (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
118 /* get_task_state() */
119 #define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \
120 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
121 __TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD)
123 #define task_is_traced(task) ((task->state & __TASK_TRACED) != 0)
124 #define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0)
125 #define task_is_stopped_or_traced(task) \
126 ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
127 #define task_contributes_to_load(task) \
128 ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
129 (task->flags & PF_FROZEN) == 0 && \
130 (task->state & TASK_NOLOAD) == 0)
132 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
134 #define __set_current_state(state_value) \
136 current->task_state_change = _THIS_IP_; \
137 current->state = (state_value); \
139 #define set_current_state(state_value) \
141 current->task_state_change = _THIS_IP_; \
142 smp_store_mb(current->state, (state_value)); \
147 * set_current_state() includes a barrier so that the write of current->state
148 * is correctly serialised wrt the caller's subsequent test of whether to
152 * set_current_state(TASK_UNINTERRUPTIBLE);
158 * __set_current_state(TASK_RUNNING);
160 * If the caller does not need such serialisation (because, for instance, the
161 * condition test and condition change and wakeup are under the same lock) then
162 * use __set_current_state().
164 * The above is typically ordered against the wakeup, which does:
166 * need_sleep = false;
167 * wake_up_state(p, TASK_UNINTERRUPTIBLE);
169 * Where wake_up_state() (and all other wakeup primitives) imply enough
170 * barriers to order the store of the variable against wakeup.
172 * Wakeup will do: if (@state & p->state) p->state = TASK_RUNNING, that is,
173 * once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a
174 * TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING).
176 * This is obviously fine, since they both store the exact same value.
178 * Also see the comments of try_to_wake_up().
180 #define __set_current_state(state_value) \
181 do { current->state = (state_value); } while (0)
182 #define set_current_state(state_value) \
183 smp_store_mb(current->state, (state_value))
187 /* Task command name length */
188 #define TASK_COMM_LEN 16
190 #include <linux/spinlock.h>
193 * This serializes "schedule()" and also protects
194 * the run-queue from deletions/modifications (but
195 * _adding_ to the beginning of the run-queue has
198 extern rwlock_t tasklist_lock;
199 extern spinlock_t mmlist_lock;
203 #ifdef CONFIG_PROVE_RCU
204 extern int lockdep_tasklist_lock_is_held(void);
205 #endif /* #ifdef CONFIG_PROVE_RCU */
207 extern void sched_init(void);
208 extern void sched_init_smp(void);
209 extern asmlinkage void schedule_tail(struct task_struct *prev);
210 extern void init_idle(struct task_struct *idle, int cpu);
211 extern void init_idle_bootup_task(struct task_struct *idle);
213 extern cpumask_var_t cpu_isolated_map;
215 extern int runqueue_is_locked(int cpu);
217 #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
218 extern void nohz_balance_enter_idle(int cpu);
219 extern void set_cpu_sd_state_idle(void);
220 extern int get_nohz_timer_target(void);
222 static inline void nohz_balance_enter_idle(int cpu) { }
223 static inline void set_cpu_sd_state_idle(void) { }
227 * Only dump TASK_* tasks. (0 for all tasks)
229 extern void show_state_filter(unsigned long state_filter);
231 static inline void show_state(void)
233 show_state_filter(0);
236 extern void show_regs(struct pt_regs *);
239 * TASK is a pointer to the task whose backtrace we want to see (or NULL for current
240 * task), SP is the stack pointer of the first frame that should be shown in the back
241 * trace (or NULL if the entire call-chain of the task should be shown).
243 extern void show_stack(struct task_struct *task, unsigned long *sp);
245 extern void cpu_init (void);
246 extern void trap_init(void);
247 extern void update_process_times(int user);
248 extern void scheduler_tick(void);
249 extern int sched_cpu_starting(unsigned int cpu);
250 extern int sched_cpu_activate(unsigned int cpu);
251 extern int sched_cpu_deactivate(unsigned int cpu);
253 #ifdef CONFIG_HOTPLUG_CPU
254 extern int sched_cpu_dying(unsigned int cpu);
256 # define sched_cpu_dying NULL
259 extern void sched_show_task(struct task_struct *p);
261 /* Attach to any functions which should be ignored in wchan output. */
262 #define __sched __attribute__((__section__(".sched.text")))
264 /* Linker adds these: start and end of __sched functions */
265 extern char __sched_text_start[], __sched_text_end[];
267 /* Is this address in the __sched functions? */
268 extern int in_sched_functions(unsigned long addr);
270 #define MAX_SCHEDULE_TIMEOUT LONG_MAX
271 extern signed long schedule_timeout(signed long timeout);
272 extern signed long schedule_timeout_interruptible(signed long timeout);
273 extern signed long schedule_timeout_killable(signed long timeout);
274 extern signed long schedule_timeout_uninterruptible(signed long timeout);
275 extern signed long schedule_timeout_idle(signed long timeout);
276 asmlinkage void schedule(void);
277 extern void schedule_preempt_disabled(void);
279 extern int __must_check io_schedule_prepare(void);
280 extern void io_schedule_finish(int token);
281 extern long io_schedule_timeout(long timeout);
282 extern void io_schedule(void);
284 void __noreturn do_task_dead(void);
289 * struct prev_cputime - snaphsot of system and user cputime
290 * @utime: time spent in user mode
291 * @stime: time spent in system mode
292 * @lock: protects the above two fields
294 * Stores previous user/system time values such that we can guarantee
297 struct prev_cputime {
298 #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
305 static inline void prev_cputime_init(struct prev_cputime *prev)
307 #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
308 prev->utime = prev->stime = 0;
309 raw_spin_lock_init(&prev->lock);
314 * struct task_cputime - collected CPU time counts
315 * @utime: time spent in user mode, in nanoseconds
316 * @stime: time spent in kernel mode, in nanoseconds
317 * @sum_exec_runtime: total time spent on the CPU, in nanoseconds
319 * This structure groups together three kinds of CPU time that are tracked for
320 * threads and thread groups. Most things considering CPU time want to group
321 * these counts together and treat all three of them in parallel.
323 struct task_cputime {
326 unsigned long long sum_exec_runtime;
329 /* Alternate field names when used to cache expirations. */
330 #define virt_exp utime
331 #define prof_exp stime
332 #define sched_exp sum_exec_runtime
335 * This is the atomic variant of task_cputime, which can be used for
336 * storing and updating task_cputime statistics without locking.
338 struct task_cputime_atomic {
341 atomic64_t sum_exec_runtime;
344 #define INIT_CPUTIME_ATOMIC \
345 (struct task_cputime_atomic) { \
346 .utime = ATOMIC64_INIT(0), \
347 .stime = ATOMIC64_INIT(0), \
348 .sum_exec_runtime = ATOMIC64_INIT(0), \
351 #define PREEMPT_DISABLED (PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED)
354 * Disable preemption until the scheduler is running -- use an unconditional
355 * value so that it also works on !PREEMPT_COUNT kernels.
357 * Reset by start_kernel()->sched_init()->init_idle()->init_idle_preempt_count().
359 #define INIT_PREEMPT_COUNT PREEMPT_OFFSET
362 * Initial preempt_count value; reflects the preempt_count schedule invariant
363 * which states that during context switches:
365 * preempt_count() == 2*PREEMPT_DISABLE_OFFSET
367 * Note: PREEMPT_DISABLE_OFFSET is 0 for !PREEMPT_COUNT kernels.
368 * Note: See finish_task_switch().
370 #define FORK_PREEMPT_COUNT (2*PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED)
373 * struct thread_group_cputimer - thread group interval timer counts
374 * @cputime_atomic: atomic thread group interval timers.
375 * @running: true when there are timers running and
376 * @cputime_atomic receives updates.
377 * @checking_timer: true when a thread in the group is in the
378 * process of checking for thread group timers.
380 * This structure contains the version of task_cputime, above, that is
381 * used for thread group CPU timer calculations.
383 struct thread_group_cputimer {
384 struct task_cputime_atomic cputime_atomic;
389 #include <linux/rwsem.h>
392 struct backing_dev_info;
393 struct reclaim_state;
395 #ifdef CONFIG_SCHED_INFO
397 /* cumulative counters */
398 unsigned long pcount; /* # of times run on this cpu */
399 unsigned long long run_delay; /* time spent waiting on a runqueue */
402 unsigned long long last_arrival,/* when we last ran on a cpu */
403 last_queued; /* when we were last queued to run */
405 #endif /* CONFIG_SCHED_INFO */
407 struct task_delay_info;
409 static inline int sched_info_on(void)
411 #ifdef CONFIG_SCHEDSTATS
413 #elif defined(CONFIG_TASK_DELAY_ACCT)
414 extern int delayacct_on;
421 #ifdef CONFIG_SCHEDSTATS
422 void force_schedstat_enabled(void);
426 * Integer metrics need fixed point arithmetic, e.g., sched/fair
427 * has a few: load, load_avg, util_avg, freq, and capacity.
429 * We define a basic fixed point arithmetic range, and then formalize
430 * all these metrics based on that basic range.
432 # define SCHED_FIXEDPOINT_SHIFT 10
433 # define SCHED_FIXEDPOINT_SCALE (1L << SCHED_FIXEDPOINT_SHIFT)
435 struct io_context; /* See blkdev.h */
438 #ifdef ARCH_HAS_PREFETCH_SWITCH_STACK
439 extern void prefetch_stack(struct task_struct *t);
441 static inline void prefetch_stack(struct task_struct *t) { }
444 struct audit_context; /* See audit.c */
446 struct pipe_inode_info;
447 struct uts_namespace;
450 unsigned long weight;
455 * The load_avg/util_avg accumulates an infinite geometric series
456 * (see __update_load_avg() in kernel/sched/fair.c).
458 * [load_avg definition]
460 * load_avg = runnable% * scale_load_down(load)
462 * where runnable% is the time ratio that a sched_entity is runnable.
463 * For cfs_rq, it is the aggregated load_avg of all runnable and
464 * blocked sched_entities.
466 * load_avg may also take frequency scaling into account:
468 * load_avg = runnable% * scale_load_down(load) * freq%
470 * where freq% is the CPU frequency normalized to the highest frequency.
472 * [util_avg definition]
474 * util_avg = running% * SCHED_CAPACITY_SCALE
476 * where running% is the time ratio that a sched_entity is running on
477 * a CPU. For cfs_rq, it is the aggregated util_avg of all runnable
478 * and blocked sched_entities.
480 * util_avg may also factor frequency scaling and CPU capacity scaling:
482 * util_avg = running% * SCHED_CAPACITY_SCALE * freq% * capacity%
484 * where freq% is the same as above, and capacity% is the CPU capacity
485 * normalized to the greatest capacity (due to uarch differences, etc).
487 * N.B., the above ratios (runnable%, running%, freq%, and capacity%)
488 * themselves are in the range of [0, 1]. To do fixed point arithmetics,
489 * we therefore scale them to as large a range as necessary. This is for
490 * example reflected by util_avg's SCHED_CAPACITY_SCALE.
494 * The 64-bit load_sum can have 4353082796 (=2^64/47742/88761) entities
495 * with the highest load (=88761), always runnable on a single cfs_rq,
496 * and should not overflow as the number already hits PID_MAX_LIMIT.
498 * For all other cases (including 32-bit kernels), struct load_weight's
499 * weight will overflow first before we do, because:
501 * Max(load_avg) <= Max(load.weight)
503 * Then it is the load_weight's responsibility to consider overflow
507 u64 last_update_time, load_sum;
508 u32 util_sum, period_contrib;
509 unsigned long load_avg, util_avg;
512 #ifdef CONFIG_SCHEDSTATS
513 struct sched_statistics {
523 s64 sum_sleep_runtime;
530 u64 nr_migrations_cold;
531 u64 nr_failed_migrations_affine;
532 u64 nr_failed_migrations_running;
533 u64 nr_failed_migrations_hot;
534 u64 nr_forced_migrations;
538 u64 nr_wakeups_migrate;
539 u64 nr_wakeups_local;
540 u64 nr_wakeups_remote;
541 u64 nr_wakeups_affine;
542 u64 nr_wakeups_affine_attempts;
543 u64 nr_wakeups_passive;
548 struct sched_entity {
549 struct load_weight load; /* for load-balancing */
550 struct rb_node run_node;
551 struct list_head group_node;
555 u64 sum_exec_runtime;
557 u64 prev_sum_exec_runtime;
561 #ifdef CONFIG_SCHEDSTATS
562 struct sched_statistics statistics;
565 #ifdef CONFIG_FAIR_GROUP_SCHED
567 struct sched_entity *parent;
568 /* rq on which this entity is (to be) queued: */
569 struct cfs_rq *cfs_rq;
570 /* rq "owned" by this entity/group: */
576 * Per entity load average tracking.
578 * Put into separate cache line so it does not
579 * collide with read-mostly values above.
581 struct sched_avg avg ____cacheline_aligned_in_smp;
585 struct sched_rt_entity {
586 struct list_head run_list;
587 unsigned long timeout;
588 unsigned long watchdog_stamp;
589 unsigned int time_slice;
590 unsigned short on_rq;
591 unsigned short on_list;
593 struct sched_rt_entity *back;
594 #ifdef CONFIG_RT_GROUP_SCHED
595 struct sched_rt_entity *parent;
596 /* rq on which this entity is (to be) queued: */
598 /* rq "owned" by this entity/group: */
603 struct sched_dl_entity {
604 struct rb_node rb_node;
607 * Original scheduling parameters. Copied here from sched_attr
608 * during sched_setattr(), they will remain the same until
609 * the next sched_setattr().
611 u64 dl_runtime; /* maximum runtime for each instance */
612 u64 dl_deadline; /* relative deadline of each instance */
613 u64 dl_period; /* separation of two instances (period) */
614 u64 dl_bw; /* dl_runtime / dl_deadline */
617 * Actual scheduling parameters. Initialized with the values above,
618 * they are continously updated during task execution. Note that
619 * the remaining runtime could be < 0 in case we are in overrun.
621 s64 runtime; /* remaining runtime for this instance */
622 u64 deadline; /* absolute deadline for this instance */
623 unsigned int flags; /* specifying the scheduler behaviour */
628 * @dl_throttled tells if we exhausted the runtime. If so, the
629 * task has to wait for a replenishment to be performed at the
630 * next firing of dl_timer.
632 * @dl_boosted tells if we are boosted due to DI. If so we are
633 * outside bandwidth enforcement mechanism (but only until we
634 * exit the critical section);
636 * @dl_yielded tells if task gave up the cpu before consuming
637 * all its available runtime during the last job.
639 int dl_throttled, dl_boosted, dl_yielded;
642 * Bandwidth enforcement timer. Each -deadline task has its
643 * own bandwidth to be enforced, thus we need one timer per task.
645 struct hrtimer dl_timer;
653 u8 pad; /* Otherwise the compiler can store garbage here. */
655 u32 s; /* Set of bits. */
659 enum perf_event_task_context {
660 perf_invalid_context = -1,
663 perf_nr_task_contexts,
667 struct wake_q_node *next;
670 /* Track pages that require TLB flushes */
671 struct tlbflush_unmap_batch {
673 * Each bit set is a CPU that potentially has a TLB entry for one of
674 * the PFNs being flushed. See set_tlb_ubc_flush_pending().
676 struct cpumask cpumask;
678 /* True if any bit in cpumask is set */
682 * If true then the PTE was dirty when unmapped. The entry must be
683 * flushed before IO is initiated or a stale TLB entry potentially
684 * allows an update without redirtying the page.
690 #ifdef CONFIG_THREAD_INFO_IN_TASK
692 * For reasons of header soup (see current_thread_info()), this
693 * must be the first element of task_struct.
695 struct thread_info thread_info;
697 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
700 unsigned int flags; /* per process flags, defined below */
704 struct llist_node wake_entry;
706 #ifdef CONFIG_THREAD_INFO_IN_TASK
707 unsigned int cpu; /* current CPU */
709 unsigned int wakee_flips;
710 unsigned long wakee_flip_decay_ts;
711 struct task_struct *last_wakee;
717 int prio, static_prio, normal_prio;
718 unsigned int rt_priority;
719 const struct sched_class *sched_class;
720 struct sched_entity se;
721 struct sched_rt_entity rt;
722 #ifdef CONFIG_CGROUP_SCHED
723 struct task_group *sched_task_group;
725 struct sched_dl_entity dl;
727 #ifdef CONFIG_PREEMPT_NOTIFIERS
728 /* list of struct preempt_notifier: */
729 struct hlist_head preempt_notifiers;
732 #ifdef CONFIG_BLK_DEV_IO_TRACE
733 unsigned int btrace_seq;
738 cpumask_t cpus_allowed;
740 #ifdef CONFIG_PREEMPT_RCU
741 int rcu_read_lock_nesting;
742 union rcu_special rcu_read_unlock_special;
743 struct list_head rcu_node_entry;
744 struct rcu_node *rcu_blocked_node;
745 #endif /* #ifdef CONFIG_PREEMPT_RCU */
746 #ifdef CONFIG_TASKS_RCU
747 unsigned long rcu_tasks_nvcsw;
748 bool rcu_tasks_holdout;
749 struct list_head rcu_tasks_holdout_list;
750 int rcu_tasks_idle_cpu;
751 #endif /* #ifdef CONFIG_TASKS_RCU */
753 #ifdef CONFIG_SCHED_INFO
754 struct sched_info sched_info;
757 struct list_head tasks;
759 struct plist_node pushable_tasks;
760 struct rb_node pushable_dl_tasks;
763 struct mm_struct *mm, *active_mm;
765 /* Per-thread vma caching: */
766 struct vmacache vmacache;
768 #if defined(SPLIT_RSS_COUNTING)
769 struct task_rss_stat rss_stat;
773 int exit_code, exit_signal;
774 int pdeath_signal; /* The signal sent when the parent dies */
775 unsigned long jobctl; /* JOBCTL_*, siglock protected */
777 /* Used for emulating ABI behavior of previous Linux versions */
778 unsigned int personality;
780 /* scheduler bits, serialized by scheduler locks */
781 unsigned sched_reset_on_fork:1;
782 unsigned sched_contributes_to_load:1;
783 unsigned sched_migrated:1;
784 unsigned sched_remote_wakeup:1;
785 unsigned :0; /* force alignment to the next boundary */
787 /* unserialized, strictly 'current' */
788 unsigned in_execve:1; /* bit to tell LSMs we're in execve */
789 unsigned in_iowait:1;
790 #if !defined(TIF_RESTORE_SIGMASK)
791 unsigned restore_sigmask:1;
794 unsigned memcg_may_oom:1;
796 unsigned memcg_kmem_skip_account:1;
799 #ifdef CONFIG_COMPAT_BRK
800 unsigned brk_randomized:1;
803 unsigned long atomic_flags; /* Flags needing atomic access. */
805 struct restart_block restart_block;
810 #ifdef CONFIG_CC_STACKPROTECTOR
811 /* Canary value for the -fstack-protector gcc feature */
812 unsigned long stack_canary;
815 * pointers to (original) parent process, youngest child, younger sibling,
816 * older sibling, respectively. (p->father can be replaced with
817 * p->real_parent->pid)
819 struct task_struct __rcu *real_parent; /* real parent process */
820 struct task_struct __rcu *parent; /* recipient of SIGCHLD, wait4() reports */
822 * children/sibling forms the list of my natural children
824 struct list_head children; /* list of my children */
825 struct list_head sibling; /* linkage in my parent's children list */
826 struct task_struct *group_leader; /* threadgroup leader */
829 * ptraced is the list of tasks this task is using ptrace on.
830 * This includes both natural children and PTRACE_ATTACH targets.
831 * p->ptrace_entry is p's link on the p->parent->ptraced list.
833 struct list_head ptraced;
834 struct list_head ptrace_entry;
836 /* PID/PID hash table linkage. */
837 struct pid_link pids[PIDTYPE_MAX];
838 struct list_head thread_group;
839 struct list_head thread_node;
841 struct completion *vfork_done; /* for vfork() */
842 int __user *set_child_tid; /* CLONE_CHILD_SETTID */
843 int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
846 #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
847 u64 utimescaled, stimescaled;
850 struct prev_cputime prev_cputime;
851 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
852 seqcount_t vtime_seqcount;
853 unsigned long long vtime_snap;
855 /* Task is sleeping or running in a CPU with VTIME inactive */
857 /* Task runs in userspace in a CPU with VTIME active */
859 /* Task runs in kernelspace in a CPU with VTIME active */
864 #ifdef CONFIG_NO_HZ_FULL
865 atomic_t tick_dep_mask;
867 unsigned long nvcsw, nivcsw; /* context switch counts */
868 u64 start_time; /* monotonic time in nsec */
869 u64 real_start_time; /* boot based time in nsec */
870 /* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
871 unsigned long min_flt, maj_flt;
873 #ifdef CONFIG_POSIX_TIMERS
874 struct task_cputime cputime_expires;
875 struct list_head cpu_timers[3];
878 /* process credentials */
879 const struct cred __rcu *ptracer_cred; /* Tracer's credentials at attach */
880 const struct cred __rcu *real_cred; /* objective and real subjective task
881 * credentials (COW) */
882 const struct cred __rcu *cred; /* effective (overridable) subjective task
883 * credentials (COW) */
884 char comm[TASK_COMM_LEN]; /* executable name excluding path
885 - access with [gs]et_task_comm (which lock
887 - initialized normally by setup_new_exec */
888 /* file system info */
889 struct nameidata *nameidata;
890 #ifdef CONFIG_SYSVIPC
892 struct sysv_sem sysvsem;
893 struct sysv_shm sysvshm;
895 #ifdef CONFIG_DETECT_HUNG_TASK
896 /* hung task detection */
897 unsigned long last_switch_count;
899 /* filesystem information */
900 struct fs_struct *fs;
901 /* open file information */
902 struct files_struct *files;
904 struct nsproxy *nsproxy;
905 /* signal handlers */
906 struct signal_struct *signal;
907 struct sighand_struct *sighand;
909 sigset_t blocked, real_blocked;
910 sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */
911 struct sigpending pending;
913 unsigned long sas_ss_sp;
915 unsigned sas_ss_flags;
917 struct callback_head *task_works;
919 struct audit_context *audit_context;
920 #ifdef CONFIG_AUDITSYSCALL
922 unsigned int sessionid;
924 struct seccomp seccomp;
926 /* Thread group tracking */
929 /* Protection of (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed,
931 spinlock_t alloc_lock;
933 /* Protection of the PI data structures: */
934 raw_spinlock_t pi_lock;
936 struct wake_q_node wake_q;
938 #ifdef CONFIG_RT_MUTEXES
939 /* PI waiters blocked on a rt_mutex held by this task */
940 struct rb_root pi_waiters;
941 struct rb_node *pi_waiters_leftmost;
942 /* Deadlock detection and priority inheritance handling */
943 struct rt_mutex_waiter *pi_blocked_on;
946 #ifdef CONFIG_DEBUG_MUTEXES
947 /* mutex deadlock detection */
948 struct mutex_waiter *blocked_on;
950 #ifdef CONFIG_TRACE_IRQFLAGS
951 unsigned int irq_events;
952 unsigned long hardirq_enable_ip;
953 unsigned long hardirq_disable_ip;
954 unsigned int hardirq_enable_event;
955 unsigned int hardirq_disable_event;
956 int hardirqs_enabled;
958 unsigned long softirq_disable_ip;
959 unsigned long softirq_enable_ip;
960 unsigned int softirq_disable_event;
961 unsigned int softirq_enable_event;
962 int softirqs_enabled;
965 #ifdef CONFIG_LOCKDEP
966 # define MAX_LOCK_DEPTH 48UL
969 unsigned int lockdep_recursion;
970 struct held_lock held_locks[MAX_LOCK_DEPTH];
971 gfp_t lockdep_reclaim_gfp;
974 unsigned int in_ubsan;
977 /* journalling filesystem info */
980 /* stacked block device info */
981 struct bio_list *bio_list;
985 struct blk_plug *plug;
989 struct reclaim_state *reclaim_state;
991 struct backing_dev_info *backing_dev_info;
993 struct io_context *io_context;
995 unsigned long ptrace_message;
996 siginfo_t *last_siginfo; /* For ptrace use. */
997 struct task_io_accounting ioac;
998 #if defined(CONFIG_TASK_XACCT)
999 u64 acct_rss_mem1; /* accumulated rss usage */
1000 u64 acct_vm_mem1; /* accumulated virtual memory usage */
1001 u64 acct_timexpd; /* stime + utime since last update */
1003 #ifdef CONFIG_CPUSETS
1004 nodemask_t mems_allowed; /* Protected by alloc_lock */
1005 seqcount_t mems_allowed_seq; /* Seqence no to catch updates */
1006 int cpuset_mem_spread_rotor;
1007 int cpuset_slab_spread_rotor;
1009 #ifdef CONFIG_CGROUPS
1010 /* Control Group info protected by css_set_lock */
1011 struct css_set __rcu *cgroups;
1012 /* cg_list protected by css_set_lock and tsk->alloc_lock */
1013 struct list_head cg_list;
1015 #ifdef CONFIG_INTEL_RDT_A
1019 struct robust_list_head __user *robust_list;
1020 #ifdef CONFIG_COMPAT
1021 struct compat_robust_list_head __user *compat_robust_list;
1023 struct list_head pi_state_list;
1024 struct futex_pi_state *pi_state_cache;
1026 #ifdef CONFIG_PERF_EVENTS
1027 struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
1028 struct mutex perf_event_mutex;
1029 struct list_head perf_event_list;
1031 #ifdef CONFIG_DEBUG_PREEMPT
1032 unsigned long preempt_disable_ip;
1035 struct mempolicy *mempolicy; /* Protected by alloc_lock */
1037 short pref_node_fork;
1039 #ifdef CONFIG_NUMA_BALANCING
1041 unsigned int numa_scan_period;
1042 unsigned int numa_scan_period_max;
1043 int numa_preferred_nid;
1044 unsigned long numa_migrate_retry;
1045 u64 node_stamp; /* migration stamp */
1046 u64 last_task_numa_placement;
1047 u64 last_sum_exec_runtime;
1048 struct callback_head numa_work;
1050 struct list_head numa_entry;
1051 struct numa_group *numa_group;
1054 * numa_faults is an array split into four regions:
1055 * faults_memory, faults_cpu, faults_memory_buffer, faults_cpu_buffer
1056 * in this precise order.
1058 * faults_memory: Exponential decaying average of faults on a per-node
1059 * basis. Scheduling placement decisions are made based on these
1060 * counts. The values remain static for the duration of a PTE scan.
1061 * faults_cpu: Track the nodes the process was running on when a NUMA
1062 * hinting fault was incurred.
1063 * faults_memory_buffer and faults_cpu_buffer: Record faults per node
1064 * during the current scan window. When the scan completes, the counts
1065 * in faults_memory and faults_cpu decay and these values are copied.
1067 unsigned long *numa_faults;
1068 unsigned long total_numa_faults;
1071 * numa_faults_locality tracks if faults recorded during the last
1072 * scan window were remote/local or failed to migrate. The task scan
1073 * period is adapted based on the locality of the faults with different
1074 * weights depending on whether they were shared or private faults
1076 unsigned long numa_faults_locality[3];
1078 unsigned long numa_pages_migrated;
1079 #endif /* CONFIG_NUMA_BALANCING */
1081 #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
1082 struct tlbflush_unmap_batch tlb_ubc;
1085 struct rcu_head rcu;
1088 * cache last used pipe for splice
1090 struct pipe_inode_info *splice_pipe;
1092 struct page_frag task_frag;
1094 #ifdef CONFIG_TASK_DELAY_ACCT
1095 struct task_delay_info *delays;
1098 #ifdef CONFIG_FAULT_INJECTION
1102 * when (nr_dirtied >= nr_dirtied_pause), it's time to call
1103 * balance_dirty_pages() for some dirty throttling pause
1106 int nr_dirtied_pause;
1107 unsigned long dirty_paused_when; /* start of a write-and-pause period */
1109 #ifdef CONFIG_LATENCYTOP
1110 int latency_record_count;
1111 struct latency_record latency_record[LT_SAVECOUNT];
1114 * time slack values; these are used to round up poll() and
1115 * select() etc timeout values. These are in nanoseconds.
1118 u64 default_timer_slack_ns;
1121 unsigned int kasan_depth;
1123 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1124 /* Index of current stored address in ret_stack */
1126 /* Stack of return addresses for return function tracing */
1127 struct ftrace_ret_stack *ret_stack;
1128 /* time stamp for last schedule */
1129 unsigned long long ftrace_timestamp;
1131 * Number of functions that haven't been traced
1132 * because of depth overrun.
1134 atomic_t trace_overrun;
1135 /* Pause for the tracing */
1136 atomic_t tracing_graph_pause;
1138 #ifdef CONFIG_TRACING
1139 /* state flags for use by tracers */
1140 unsigned long trace;
1141 /* bitmask and counter of trace recursion */
1142 unsigned long trace_recursion;
1143 #endif /* CONFIG_TRACING */
1145 /* Coverage collection mode enabled for this task (0 if disabled). */
1146 enum kcov_mode kcov_mode;
1147 /* Size of the kcov_area. */
1149 /* Buffer for coverage collection. */
1151 /* kcov desciptor wired with this task or NULL. */
1155 struct mem_cgroup *memcg_in_oom;
1156 gfp_t memcg_oom_gfp_mask;
1157 int memcg_oom_order;
1159 /* number of pages to reclaim on returning to userland */
1160 unsigned int memcg_nr_pages_over_high;
1162 #ifdef CONFIG_UPROBES
1163 struct uprobe_task *utask;
1165 #if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)
1166 unsigned int sequential_io;
1167 unsigned int sequential_io_avg;
1169 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
1170 unsigned long task_state_change;
1172 int pagefault_disabled;
1174 struct task_struct *oom_reaper_list;
1176 #ifdef CONFIG_VMAP_STACK
1177 struct vm_struct *stack_vm_area;
1179 #ifdef CONFIG_THREAD_INFO_IN_TASK
1180 /* A live task holds one reference. */
1181 atomic_t stack_refcount;
1183 /* CPU-specific state of this task */
1184 struct thread_struct thread;
1186 * WARNING: on x86, 'thread_struct' contains a variable-sized
1187 * structure. It *MUST* be at the end of 'task_struct'.
1189 * Do not put anything below here!
1193 #ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT
1194 extern int arch_task_struct_size __read_mostly;
1196 # define arch_task_struct_size (sizeof(struct task_struct))
1199 #ifdef CONFIG_VMAP_STACK
1200 static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t)
1202 return t->stack_vm_area;
1205 static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t)
1211 static inline struct pid *task_pid(struct task_struct *task)
1213 return task->pids[PIDTYPE_PID].pid;
1216 static inline struct pid *task_tgid(struct task_struct *task)
1218 return task->group_leader->pids[PIDTYPE_PID].pid;
1222 * Without tasklist or rcu lock it is not safe to dereference
1223 * the result of task_pgrp/task_session even if task == current,
1224 * we can race with another thread doing sys_setsid/sys_setpgid.
1226 static inline struct pid *task_pgrp(struct task_struct *task)
1228 return task->group_leader->pids[PIDTYPE_PGID].pid;
1231 static inline struct pid *task_session(struct task_struct *task)
1233 return task->group_leader->pids[PIDTYPE_SID].pid;
1236 struct pid_namespace;
1239 * the helpers to get the task's different pids as they are seen
1240 * from various namespaces
1242 * task_xid_nr() : global id, i.e. the id seen from the init namespace;
1243 * task_xid_vnr() : virtual id, i.e. the id seen from the pid namespace of
1245 * task_xid_nr_ns() : id seen from the ns specified;
1247 * set_task_vxid() : assigns a virtual id to a task;
1249 * see also pid_nr() etc in include/linux/pid.h
1251 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
1252 struct pid_namespace *ns);
1254 static inline pid_t task_pid_nr(struct task_struct *tsk)
1259 static inline pid_t task_pid_nr_ns(struct task_struct *tsk,
1260 struct pid_namespace *ns)
1262 return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
1265 static inline pid_t task_pid_vnr(struct task_struct *tsk)
1267 return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
1271 static inline pid_t task_tgid_nr(struct task_struct *tsk)
1276 pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
1278 static inline pid_t task_tgid_vnr(struct task_struct *tsk)
1280 return pid_vnr(task_tgid(tsk));
1284 static inline int pid_alive(const struct task_struct *p);
1285 static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
1291 pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
1297 static inline pid_t task_ppid_nr(const struct task_struct *tsk)
1299 return task_ppid_nr_ns(tsk, &init_pid_ns);
1302 static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk,
1303 struct pid_namespace *ns)
1305 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
1308 static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
1310 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
1314 static inline pid_t task_session_nr_ns(struct task_struct *tsk,
1315 struct pid_namespace *ns)
1317 return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
1320 static inline pid_t task_session_vnr(struct task_struct *tsk)
1322 return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
1325 /* obsolete, do not use */
1326 static inline pid_t task_pgrp_nr(struct task_struct *tsk)
1328 return task_pgrp_nr_ns(tsk, &init_pid_ns);
1332 * pid_alive - check that a task structure is not stale
1333 * @p: Task structure to be checked.
1335 * Test if a process is not yet dead (at most zombie state)
1336 * If pid_alive fails, then pointers within the task structure
1337 * can be stale and must not be dereferenced.
1339 * Return: 1 if the process is alive. 0 otherwise.
1341 static inline int pid_alive(const struct task_struct *p)
1343 return p->pids[PIDTYPE_PID].pid != NULL;
1347 * is_global_init - check if a task structure is init. Since init
1348 * is free to have sub-threads we need to check tgid.
1349 * @tsk: Task structure to be checked.
1351 * Check if a task structure is the first user space task the kernel created.
1353 * Return: 1 if the task structure is init. 0 otherwise.
1355 static inline int is_global_init(struct task_struct *tsk)
1357 return task_tgid_nr(tsk) == 1;
1360 extern struct pid *cad_pid;
1362 extern void free_task(struct task_struct *tsk);
1363 #define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
1365 extern void __put_task_struct(struct task_struct *t);
1367 static inline void put_task_struct(struct task_struct *t)
1369 if (atomic_dec_and_test(&t->usage))
1370 __put_task_struct(t);
1373 struct task_struct *task_rcu_dereference(struct task_struct **ptask);
1374 struct task_struct *try_get_task_struct(struct task_struct **ptask);
1376 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
1377 extern void task_cputime(struct task_struct *t,
1378 u64 *utime, u64 *stime);
1379 extern u64 task_gtime(struct task_struct *t);
1381 static inline void task_cputime(struct task_struct *t,
1382 u64 *utime, u64 *stime)
1388 static inline u64 task_gtime(struct task_struct *t)
1394 #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
1395 static inline void task_cputime_scaled(struct task_struct *t,
1399 *utimescaled = t->utimescaled;
1400 *stimescaled = t->stimescaled;
1403 static inline void task_cputime_scaled(struct task_struct *t,
1407 task_cputime(t, utimescaled, stimescaled);
1411 extern void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st);
1412 extern void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st);
1417 #define PF_IDLE 0x00000002 /* I am an IDLE thread */
1418 #define PF_EXITING 0x00000004 /* getting shut down */
1419 #define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */
1420 #define PF_VCPU 0x00000010 /* I'm a virtual CPU */
1421 #define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */
1422 #define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */
1423 #define PF_MCE_PROCESS 0x00000080 /* process policy on mce errors */
1424 #define PF_SUPERPRIV 0x00000100 /* used super-user privileges */
1425 #define PF_DUMPCORE 0x00000200 /* dumped core */
1426 #define PF_SIGNALED 0x00000400 /* killed by a signal */
1427 #define PF_MEMALLOC 0x00000800 /* Allocating memory */
1428 #define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */
1429 #define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
1430 #define PF_USED_ASYNC 0x00004000 /* used async_schedule*(), used by module init */
1431 #define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
1432 #define PF_FROZEN 0x00010000 /* frozen for system suspend */
1433 #define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */
1434 #define PF_KSWAPD 0x00040000 /* I am kswapd */
1435 #define PF_MEMALLOC_NOIO 0x00080000 /* Allocating memory without IO involved */
1436 #define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */
1437 #define PF_KTHREAD 0x00200000 /* I am a kernel thread */
1438 #define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */
1439 #define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */
1440 #define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */
1441 #define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */
1442 #define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */
1443 #define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */
1444 #define PF_SUSPEND_TASK 0x80000000 /* this thread called freeze_processes and should not be frozen */
1447 * Only the _current_ task can read/write to tsk->flags, but other
1448 * tasks can access tsk->flags in readonly mode for example
1449 * with tsk_used_math (like during threaded core dumping).
1450 * There is however an exception to this rule during ptrace
1451 * or during fork: the ptracer task is allowed to write to the
1452 * child->flags of its traced child (same goes for fork, the parent
1453 * can write to the child->flags), because we're guaranteed the
1454 * child is not running and in turn not changing child->flags
1455 * at the same time the parent does it.
1457 #define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
1458 #define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
1459 #define clear_used_math() clear_stopped_child_used_math(current)
1460 #define set_used_math() set_stopped_child_used_math(current)
1461 #define conditional_stopped_child_used_math(condition, child) \
1462 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
1463 #define conditional_used_math(condition) \
1464 conditional_stopped_child_used_math(condition, current)
1465 #define copy_to_stopped_child_used_math(child) \
1466 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
1467 /* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */
1468 #define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
1469 #define used_math() tsk_used_math(current)
1471 /* __GFP_IO isn't allowed if PF_MEMALLOC_NOIO is set in current->flags
1472 * __GFP_FS is also cleared as it implies __GFP_IO.
1474 static inline gfp_t memalloc_noio_flags(gfp_t flags)
1476 if (unlikely(current->flags & PF_MEMALLOC_NOIO))
1477 flags &= ~(__GFP_IO | __GFP_FS);
1481 static inline unsigned int memalloc_noio_save(void)
1483 unsigned int flags = current->flags & PF_MEMALLOC_NOIO;
1484 current->flags |= PF_MEMALLOC_NOIO;
1488 static inline void memalloc_noio_restore(unsigned int flags)
1490 current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags;
1493 /* Per-process atomic flags. */
1494 #define PFA_NO_NEW_PRIVS 0 /* May not gain new privileges. */
1495 #define PFA_SPREAD_PAGE 1 /* Spread page cache over cpuset */
1496 #define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */
1497 #define PFA_LMK_WAITING 3 /* Lowmemorykiller is waiting */
1500 #define TASK_PFA_TEST(name, func) \
1501 static inline bool task_##func(struct task_struct *p) \
1502 { return test_bit(PFA_##name, &p->atomic_flags); }
1503 #define TASK_PFA_SET(name, func) \
1504 static inline void task_set_##func(struct task_struct *p) \
1505 { set_bit(PFA_##name, &p->atomic_flags); }
1506 #define TASK_PFA_CLEAR(name, func) \
1507 static inline void task_clear_##func(struct task_struct *p) \
1508 { clear_bit(PFA_##name, &p->atomic_flags); }
1510 TASK_PFA_TEST(NO_NEW_PRIVS, no_new_privs)
1511 TASK_PFA_SET(NO_NEW_PRIVS, no_new_privs)
1513 TASK_PFA_TEST(SPREAD_PAGE, spread_page)
1514 TASK_PFA_SET(SPREAD_PAGE, spread_page)
1515 TASK_PFA_CLEAR(SPREAD_PAGE, spread_page)
1517 TASK_PFA_TEST(SPREAD_SLAB, spread_slab)
1518 TASK_PFA_SET(SPREAD_SLAB, spread_slab)
1519 TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
1521 TASK_PFA_TEST(LMK_WAITING, lmk_waiting)
1522 TASK_PFA_SET(LMK_WAITING, lmk_waiting)
1524 static inline void rcu_copy_process(struct task_struct *p)
1526 #ifdef CONFIG_PREEMPT_RCU
1527 p->rcu_read_lock_nesting = 0;
1528 p->rcu_read_unlock_special.s = 0;
1529 p->rcu_blocked_node = NULL;
1530 INIT_LIST_HEAD(&p->rcu_node_entry);
1531 #endif /* #ifdef CONFIG_PREEMPT_RCU */
1532 #ifdef CONFIG_TASKS_RCU
1533 p->rcu_tasks_holdout = false;
1534 INIT_LIST_HEAD(&p->rcu_tasks_holdout_list);
1535 p->rcu_tasks_idle_cpu = -1;
1536 #endif /* #ifdef CONFIG_TASKS_RCU */
1539 static inline void tsk_restore_flags(struct task_struct *task,
1540 unsigned long orig_flags, unsigned long flags)
1542 task->flags &= ~flags;
1543 task->flags |= orig_flags & flags;
1546 extern int cpuset_cpumask_can_shrink(const struct cpumask *cur,
1547 const struct cpumask *trial);
1548 extern int task_can_attach(struct task_struct *p,
1549 const struct cpumask *cs_cpus_allowed);
1551 extern void do_set_cpus_allowed(struct task_struct *p,
1552 const struct cpumask *new_mask);
1554 extern int set_cpus_allowed_ptr(struct task_struct *p,
1555 const struct cpumask *new_mask);
1557 static inline void do_set_cpus_allowed(struct task_struct *p,
1558 const struct cpumask *new_mask)
1561 static inline int set_cpus_allowed_ptr(struct task_struct *p,
1562 const struct cpumask *new_mask)
1564 if (!cpumask_test_cpu(0, new_mask))
1570 #ifdef CONFIG_NO_HZ_COMMON
1571 void calc_load_enter_idle(void);
1572 void calc_load_exit_idle(void);
1574 static inline void calc_load_enter_idle(void) { }
1575 static inline void calc_load_exit_idle(void) { }
1576 #endif /* CONFIG_NO_HZ_COMMON */
1578 #ifndef cpu_relax_yield
1579 #define cpu_relax_yield() cpu_relax()
1582 extern unsigned long long
1583 task_sched_runtime(struct task_struct *task);
1585 /* sched_exec is called by processes performing an exec */
1587 extern void sched_exec(void);
1589 #define sched_exec() {}
1592 #ifdef CONFIG_HOTPLUG_CPU
1593 extern void idle_task_exit(void);
1595 static inline void idle_task_exit(void) {}
1598 #if defined(CONFIG_NO_HZ_COMMON) && defined(CONFIG_SMP)
1599 extern void wake_up_nohz_cpu(int cpu);
1601 static inline void wake_up_nohz_cpu(int cpu) { }
1604 #ifdef CONFIG_NO_HZ_FULL
1605 extern u64 scheduler_tick_max_deferment(void);
1608 extern int yield_to(struct task_struct *p, bool preempt);
1609 extern void set_user_nice(struct task_struct *p, long nice);
1610 extern int task_prio(const struct task_struct *p);
1612 * task_nice - return the nice value of a given task.
1613 * @p: the task in question.
1615 * Return: The nice value [ -20 ... 0 ... 19 ].
1617 static inline int task_nice(const struct task_struct *p)
1619 return PRIO_TO_NICE((p)->static_prio);
1621 extern int can_nice(const struct task_struct *p, const int nice);
1622 extern int task_curr(const struct task_struct *p);
1623 extern int idle_cpu(int cpu);
1624 extern int sched_setscheduler(struct task_struct *, int,
1625 const struct sched_param *);
1626 extern int sched_setscheduler_nocheck(struct task_struct *, int,
1627 const struct sched_param *);
1628 extern int sched_setattr(struct task_struct *,
1629 const struct sched_attr *);
1630 extern struct task_struct *idle_task(int cpu);
1632 * is_idle_task - is the specified task an idle task?
1633 * @p: the task in question.
1635 * Return: 1 if @p is an idle task. 0 otherwise.
1637 static inline bool is_idle_task(const struct task_struct *p)
1639 return !!(p->flags & PF_IDLE);
1641 extern struct task_struct *curr_task(int cpu);
1642 extern void ia64_set_curr_task(int cpu, struct task_struct *p);
1646 union thread_union {
1647 #ifndef CONFIG_THREAD_INFO_IN_TASK
1648 struct thread_info thread_info;
1650 unsigned long stack[THREAD_SIZE/sizeof(long)];
1653 #ifndef __HAVE_ARCH_KSTACK_END
1654 static inline int kstack_end(void *addr)
1656 /* Reliable end of stack detection:
1657 * Some APM bios versions misalign the stack
1659 return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*)));
1663 extern union thread_union init_thread_union;
1664 extern struct task_struct init_task;
1666 extern struct pid_namespace init_pid_ns;
1669 * find a task by one of its numerical ids
1671 * find_task_by_pid_ns():
1672 * finds a task by its pid in the specified namespace
1673 * find_task_by_vpid():
1674 * finds a task by its virtual pid
1676 * see also find_vpid() etc in include/linux/pid.h
1679 extern struct task_struct *find_task_by_vpid(pid_t nr);
1680 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
1681 struct pid_namespace *ns);
1683 #include <asm/current.h>
1685 extern void xtime_update(unsigned long ticks);
1687 extern int wake_up_state(struct task_struct *tsk, unsigned int state);
1688 extern int wake_up_process(struct task_struct *tsk);
1689 extern void wake_up_new_task(struct task_struct *tsk);
1691 extern void kick_process(struct task_struct *tsk);
1693 static inline void kick_process(struct task_struct *tsk) { }
1695 extern int sched_fork(unsigned long clone_flags, struct task_struct *p);
1696 extern void sched_dead(struct task_struct *p);
1698 extern void proc_caches_init(void);
1700 extern void release_task(struct task_struct * p);
1702 #ifdef CONFIG_HAVE_COPY_THREAD_TLS
1703 extern int copy_thread_tls(unsigned long, unsigned long, unsigned long,
1704 struct task_struct *, unsigned long);
1706 extern int copy_thread(unsigned long, unsigned long, unsigned long,
1707 struct task_struct *);
1709 /* Architectures that haven't opted into copy_thread_tls get the tls argument
1710 * via pt_regs, so ignore the tls argument passed via C. */
1711 static inline int copy_thread_tls(
1712 unsigned long clone_flags, unsigned long sp, unsigned long arg,
1713 struct task_struct *p, unsigned long tls)
1715 return copy_thread(clone_flags, sp, arg, p);
1718 extern void flush_thread(void);
1720 #ifdef CONFIG_HAVE_EXIT_THREAD
1721 extern void exit_thread(struct task_struct *tsk);
1723 static inline void exit_thread(struct task_struct *tsk)
1728 extern void exit_files(struct task_struct *);
1730 extern void exit_itimers(struct signal_struct *);
1732 extern void do_group_exit(int);
1734 extern int do_execve(struct filename *,
1735 const char __user * const __user *,
1736 const char __user * const __user *);
1737 extern int do_execveat(int, struct filename *,
1738 const char __user * const __user *,
1739 const char __user * const __user *,
1741 extern long _do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *, unsigned long);
1742 extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *);
1743 struct task_struct *fork_idle(int);
1744 extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
1746 extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec);
1747 static inline void set_task_comm(struct task_struct *tsk, const char *from)
1749 __set_task_comm(tsk, from, false);
1751 extern char *get_task_comm(char *to, struct task_struct *tsk);
1754 void scheduler_ipi(void);
1755 extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
1757 static inline void scheduler_ipi(void) { }
1758 static inline unsigned long wait_task_inactive(struct task_struct *p,
1766 * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
1767 * subscriptions and synchronises with wait4(). Also used in procfs. Also
1768 * pins the final release of task.io_context. Also protects ->cpuset and
1769 * ->cgroup.subsys[]. And ->vfork_done.
1771 * Nests both inside and outside of read_lock(&tasklist_lock).
1772 * It must not be nested with write_lock_irq(&tasklist_lock),
1773 * neither inside nor outside.
1775 static inline void task_lock(struct task_struct *p)
1777 spin_lock(&p->alloc_lock);
1780 static inline void task_unlock(struct task_struct *p)
1782 spin_unlock(&p->alloc_lock);
1785 #ifdef CONFIG_THREAD_INFO_IN_TASK
1787 static inline struct thread_info *task_thread_info(struct task_struct *task)
1789 return &task->thread_info;
1793 * When accessing the stack of a non-current task that might exit, use
1794 * try_get_task_stack() instead. task_stack_page will return a pointer
1795 * that could get freed out from under you.
1797 static inline void *task_stack_page(const struct task_struct *task)
1802 #define setup_thread_stack(new,old) do { } while(0)
1804 static inline unsigned long *end_of_stack(const struct task_struct *task)
1809 #elif !defined(__HAVE_THREAD_FUNCTIONS)
1811 #define task_thread_info(task) ((struct thread_info *)(task)->stack)
1812 #define task_stack_page(task) ((void *)(task)->stack)
1814 static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
1816 *task_thread_info(p) = *task_thread_info(org);
1817 task_thread_info(p)->task = p;
1821 * Return the address of the last usable long on the stack.
1823 * When the stack grows down, this is just above the thread
1824 * info struct. Going any lower will corrupt the threadinfo.
1826 * When the stack grows up, this is the highest address.
1827 * Beyond that position, we corrupt data on the next page.
1829 static inline unsigned long *end_of_stack(struct task_struct *p)
1831 #ifdef CONFIG_STACK_GROWSUP
1832 return (unsigned long *)((unsigned long)task_thread_info(p) + THREAD_SIZE) - 1;
1834 return (unsigned long *)(task_thread_info(p) + 1);
1840 #ifdef CONFIG_THREAD_INFO_IN_TASK
1841 static inline void *try_get_task_stack(struct task_struct *tsk)
1843 return atomic_inc_not_zero(&tsk->stack_refcount) ?
1844 task_stack_page(tsk) : NULL;
1847 extern void put_task_stack(struct task_struct *tsk);
1849 static inline void *try_get_task_stack(struct task_struct *tsk)
1851 return task_stack_page(tsk);
1854 static inline void put_task_stack(struct task_struct *tsk) {}
1857 #define task_stack_end_corrupted(task) \
1858 (*(end_of_stack(task)) != STACK_END_MAGIC)
1860 static inline int object_is_on_stack(void *obj)
1862 void *stack = task_stack_page(current);
1864 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
1867 extern void thread_stack_cache_init(void);
1869 #ifdef CONFIG_DEBUG_STACK_USAGE
1870 static inline unsigned long stack_not_used(struct task_struct *p)
1872 unsigned long *n = end_of_stack(p);
1874 do { /* Skip over canary */
1875 # ifdef CONFIG_STACK_GROWSUP
1882 # ifdef CONFIG_STACK_GROWSUP
1883 return (unsigned long)end_of_stack(p) - (unsigned long)n;
1885 return (unsigned long)n - (unsigned long)end_of_stack(p);
1889 extern void set_task_stack_end_magic(struct task_struct *tsk);
1891 /* set thread flags in other task's structures
1892 * - see asm/thread_info.h for TIF_xxxx flags available
1894 static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
1896 set_ti_thread_flag(task_thread_info(tsk), flag);
1899 static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
1901 clear_ti_thread_flag(task_thread_info(tsk), flag);
1904 static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
1906 return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
1909 static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
1911 return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
1914 static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
1916 return test_ti_thread_flag(task_thread_info(tsk), flag);
1919 static inline void set_tsk_need_resched(struct task_struct *tsk)
1921 set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
1924 static inline void clear_tsk_need_resched(struct task_struct *tsk)
1926 clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
1929 static inline int test_tsk_need_resched(struct task_struct *tsk)
1931 return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
1934 static inline int restart_syscall(void)
1936 set_tsk_thread_flag(current, TIF_SIGPENDING);
1937 return -ERESTARTNOINTR;
1940 static inline int signal_pending(struct task_struct *p)
1942 return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
1945 static inline int __fatal_signal_pending(struct task_struct *p)
1947 return unlikely(sigismember(&p->pending.signal, SIGKILL));
1950 static inline int fatal_signal_pending(struct task_struct *p)
1952 return signal_pending(p) && __fatal_signal_pending(p);
1955 static inline int signal_pending_state(long state, struct task_struct *p)
1957 if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL)))
1959 if (!signal_pending(p))
1962 return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
1966 * cond_resched() and cond_resched_lock(): latency reduction via
1967 * explicit rescheduling in places that are safe. The return
1968 * value indicates whether a reschedule was done in fact.
1969 * cond_resched_lock() will drop the spinlock before scheduling,
1970 * cond_resched_softirq() will enable bhs before scheduling.
1972 #ifndef CONFIG_PREEMPT
1973 extern int _cond_resched(void);
1975 static inline int _cond_resched(void) { return 0; }
1978 #define cond_resched() ({ \
1979 ___might_sleep(__FILE__, __LINE__, 0); \
1983 extern int __cond_resched_lock(spinlock_t *lock);
1985 #define cond_resched_lock(lock) ({ \
1986 ___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\
1987 __cond_resched_lock(lock); \
1990 extern int __cond_resched_softirq(void);
1992 #define cond_resched_softirq() ({ \
1993 ___might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \
1994 __cond_resched_softirq(); \
1997 static inline void cond_resched_rcu(void)
1999 #if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
2007 * Does a critical section need to be broken due to another
2008 * task waiting?: (technically does not depend on CONFIG_PREEMPT,
2009 * but a general need for low latency)
2011 static inline int spin_needbreak(spinlock_t *lock)
2013 #ifdef CONFIG_PREEMPT
2014 return spin_is_contended(lock);
2020 static __always_inline bool need_resched(void)
2022 return unlikely(tif_need_resched());
2026 * Thread group CPU time accounting.
2028 void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times);
2029 void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times);
2032 * Reevaluate whether the task has signals pending delivery.
2033 * Wake the task if so.
2034 * This is required every time the blocked sigset_t changes.
2035 * callers must hold sighand->siglock.
2037 extern void recalc_sigpending_and_wake(struct task_struct *t);
2038 extern void recalc_sigpending(void);
2040 extern void signal_wake_up_state(struct task_struct *t, unsigned int state);
2042 static inline void signal_wake_up(struct task_struct *t, bool resume)
2044 signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0);
2046 static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume)
2048 signal_wake_up_state(t, resume ? __TASK_TRACED : 0);
2052 * Wrappers for p->thread_info->cpu access. No-op on UP.
2056 static inline unsigned int task_cpu(const struct task_struct *p)
2058 #ifdef CONFIG_THREAD_INFO_IN_TASK
2061 return task_thread_info(p)->cpu;
2065 static inline int task_node(const struct task_struct *p)
2067 return cpu_to_node(task_cpu(p));
2070 extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
2074 static inline unsigned int task_cpu(const struct task_struct *p)
2079 static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
2083 #endif /* CONFIG_SMP */
2086 * In order to reduce various lock holder preemption latencies provide an
2087 * interface to see if a vCPU is currently running or not.
2089 * This allows us to terminate optimistic spin loops and block, analogous to
2090 * the native optimistic spin heuristic of testing if the lock owner task is
2093 #ifndef vcpu_is_preempted
2094 # define vcpu_is_preempted(cpu) false
2097 extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
2098 extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
2100 #ifdef CONFIG_CGROUP_SCHED
2101 extern struct task_group root_task_group;
2102 #endif /* CONFIG_CGROUP_SCHED */
2104 extern int task_can_switch_user(struct user_struct *up,
2105 struct task_struct *tsk);
2107 #ifdef CONFIG_TASK_XACCT
2108 static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
2110 tsk->ioac.rchar += amt;
2113 static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
2115 tsk->ioac.wchar += amt;
2118 static inline void inc_syscr(struct task_struct *tsk)
2123 static inline void inc_syscw(struct task_struct *tsk)
2128 static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
2132 static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
2136 static inline void inc_syscr(struct task_struct *tsk)
2140 static inline void inc_syscw(struct task_struct *tsk)
2145 #ifndef TASK_SIZE_OF
2146 #define TASK_SIZE_OF(tsk) TASK_SIZE