2 * Pressure stall information for CPU, memory and IO
4 * Copyright (c) 2018 Facebook, Inc.
5 * Author: Johannes Weiner <hannes@cmpxchg.org>
7 * When CPU, memory and IO are contended, tasks experience delays that
8 * reduce throughput and introduce latencies into the workload. Memory
9 * and IO contention, in addition, can cause a full loss of forward
10 * progress in which the CPU goes idle.
12 * This code aggregates individual task delays into resource pressure
13 * metrics that indicate problems with both workload health and
14 * resource utilization.
18 * The time in which a task can execute on a CPU is our baseline for
19 * productivity. Pressure expresses the amount of time in which this
20 * potential cannot be realized due to resource contention.
22 * This concept of productivity has two components: the workload and
23 * the CPU. To measure the impact of pressure on both, we define two
24 * contention states for a resource: SOME and FULL.
26 * In the SOME state of a given resource, one or more tasks are
27 * delayed on that resource. This affects the workload's ability to
28 * perform work, but the CPU may still be executing other tasks.
30 * In the FULL state of a given resource, all non-idle tasks are
31 * delayed on that resource such that nobody is advancing and the CPU
32 * goes idle. This leaves both workload and CPU unproductive.
34 * (Naturally, the FULL state doesn't exist for the CPU resource.)
36 * SOME = nr_delayed_tasks != 0
37 * FULL = nr_delayed_tasks != 0 && nr_running_tasks == 0
39 * The percentage of wallclock time spent in those compound stall
40 * states gives pressure numbers between 0 and 100 for each resource,
41 * where the SOME percentage indicates workload slowdowns and the FULL
42 * percentage indicates reduced CPU utilization:
44 * %SOME = time(SOME) / period
45 * %FULL = time(FULL) / period
49 * The more tasks and available CPUs there are, the more work can be
50 * performed concurrently. This means that the potential that can go
51 * unrealized due to resource contention *also* scales with non-idle
54 * Consider a scenario where 257 number crunching tasks are trying to
55 * run concurrently on 256 CPUs. If we simply aggregated the task
56 * states, we would have to conclude a CPU SOME pressure number of
57 * 100%, since *somebody* is waiting on a runqueue at all
58 * times. However, that is clearly not the amount of contention the
59 * workload is experiencing: only one out of 256 possible exceution
60 * threads will be contended at any given time, or about 0.4%.
62 * Conversely, consider a scenario of 4 tasks and 4 CPUs where at any
63 * given time *one* of the tasks is delayed due to a lack of memory.
64 * Again, looking purely at the task state would yield a memory FULL
65 * pressure number of 0%, since *somebody* is always making forward
66 * progress. But again this wouldn't capture the amount of execution
67 * potential lost, which is 1 out of 4 CPUs, or 25%.
69 * To calculate wasted potential (pressure) with multiple processors,
70 * we have to base our calculation on the number of non-idle tasks in
71 * conjunction with the number of available CPUs, which is the number
72 * of potential execution threads. SOME becomes then the proportion of
73 * delayed tasks to possibe threads, and FULL is the share of possible
74 * threads that are unproductive due to delays:
76 * threads = min(nr_nonidle_tasks, nr_cpus)
77 * SOME = min(nr_delayed_tasks / threads, 1)
78 * FULL = (threads - min(nr_running_tasks, threads)) / threads
80 * For the 257 number crunchers on 256 CPUs, this yields:
82 * threads = min(257, 256)
83 * SOME = min(1 / 256, 1) = 0.4%
84 * FULL = (256 - min(257, 256)) / 256 = 0%
86 * For the 1 out of 4 memory-delayed tasks, this yields:
89 * SOME = min(1 / 4, 1) = 25%
90 * FULL = (4 - min(3, 4)) / 4 = 25%
92 * [ Substitute nr_cpus with 1, and you can see that it's a natural
93 * extension of the single-CPU model. ]
97 * To assess the precise time spent in each such state, we would have
98 * to freeze the system on task changes and start/stop the state
99 * clocks accordingly. Obviously that doesn't scale in practice.
101 * Because the scheduler aims to distribute the compute load evenly
102 * among the available CPUs, we can track task state locally to each
103 * CPU and, at much lower frequency, extrapolate the global state for
104 * the cumulative stall times and the running averages.
106 * For each runqueue, we track:
108 * tSOME[cpu] = time(nr_delayed_tasks[cpu] != 0)
109 * tFULL[cpu] = time(nr_delayed_tasks[cpu] && !nr_running_tasks[cpu])
110 * tNONIDLE[cpu] = time(nr_nonidle_tasks[cpu] != 0)
112 * and then periodically aggregate:
114 * tNONIDLE = sum(tNONIDLE[i])
116 * tSOME = sum(tSOME[i] * tNONIDLE[i]) / tNONIDLE
117 * tFULL = sum(tFULL[i] * tNONIDLE[i]) / tNONIDLE
119 * %SOME = tSOME / period
120 * %FULL = tFULL / period
122 * This gives us an approximation of pressure that is practical
123 * cost-wise, yet way more sensitive and accurate than periodic
124 * sampling of the aggregate task states would be.
127 #include "../workqueue_internal.h"
128 #include <linux/sched/loadavg.h>
129 #include <linux/seq_file.h>
130 #include <linux/proc_fs.h>
131 #include <linux/seqlock.h>
132 #include <linux/cgroup.h>
133 #include <linux/module.h>
134 #include <linux/sched.h>
135 #include <linux/psi.h>
138 static int psi_bug __read_mostly;
140 DEFINE_STATIC_KEY_FALSE(psi_disabled);
142 #ifdef CONFIG_PSI_DEFAULT_DISABLED
143 static bool psi_enable;
145 static bool psi_enable = true;
147 static int __init setup_psi(char *str)
149 return kstrtobool(str, &psi_enable) == 0;
151 __setup("psi=", setup_psi);
153 /* Running averages - we need to be higher-res than loadavg */
154 #define PSI_FREQ (2*HZ+1) /* 2 sec intervals */
155 #define EXP_10s 1677 /* 1/exp(2s/10s) as fixed-point */
156 #define EXP_60s 1981 /* 1/exp(2s/60s) */
157 #define EXP_300s 2034 /* 1/exp(2s/300s) */
159 /* Sampling frequency in nanoseconds */
160 static u64 psi_period __read_mostly;
162 /* System-level pressure and stall tracking */
163 static DEFINE_PER_CPU(struct psi_group_cpu, system_group_pcpu);
164 static struct psi_group psi_system = {
165 .pcpu = &system_group_pcpu,
168 static void psi_update_work(struct work_struct *work);
170 static void group_init(struct psi_group *group)
174 for_each_possible_cpu(cpu)
175 seqcount_init(&per_cpu_ptr(group->pcpu, cpu)->seq);
176 group->next_update = sched_clock() + psi_period;
177 INIT_DELAYED_WORK(&group->clock_work, psi_update_work);
178 mutex_init(&group->stat_lock);
181 void __init psi_init(void)
184 static_branch_enable(&psi_disabled);
188 psi_period = jiffies_to_nsecs(PSI_FREQ);
189 group_init(&psi_system);
192 static bool test_state(unsigned int *tasks, enum psi_states state)
196 return tasks[NR_IOWAIT];
198 return tasks[NR_IOWAIT] && !tasks[NR_RUNNING];
200 return tasks[NR_MEMSTALL];
202 return tasks[NR_MEMSTALL] && !tasks[NR_RUNNING];
204 return tasks[NR_RUNNING] > 1;
206 return tasks[NR_IOWAIT] || tasks[NR_MEMSTALL] ||
213 static void get_recent_times(struct psi_group *group, int cpu, u32 *times)
215 struct psi_group_cpu *groupc = per_cpu_ptr(group->pcpu, cpu);
216 u64 now, state_start;
221 /* Snapshot a coherent view of the CPU state */
223 seq = read_seqcount_begin(&groupc->seq);
224 now = cpu_clock(cpu);
225 memcpy(times, groupc->times, sizeof(groupc->times));
226 state_mask = groupc->state_mask;
227 state_start = groupc->state_start;
228 } while (read_seqcount_retry(&groupc->seq, seq));
230 /* Calculate state time deltas against the previous snapshot */
231 for (s = 0; s < NR_PSI_STATES; s++) {
234 * In addition to already concluded states, we also
235 * incorporate currently active states on the CPU,
236 * since states may last for many sampling periods.
238 * This way we keep our delta sampling buckets small
239 * (u32) and our reported pressure close to what's
240 * actually happening.
242 if (state_mask & (1 << s))
243 times[s] += now - state_start;
245 delta = times[s] - groupc->times_prev[s];
246 groupc->times_prev[s] = times[s];
252 static void calc_avgs(unsigned long avg[3], int missed_periods,
253 u64 time, u64 period)
257 /* Fill in zeroes for periods of no activity */
258 if (missed_periods) {
259 avg[0] = calc_load_n(avg[0], EXP_10s, 0, missed_periods);
260 avg[1] = calc_load_n(avg[1], EXP_60s, 0, missed_periods);
261 avg[2] = calc_load_n(avg[2], EXP_300s, 0, missed_periods);
264 /* Sample the most recent active period */
265 pct = div_u64(time * 100, period);
267 avg[0] = calc_load(avg[0], EXP_10s, pct);
268 avg[1] = calc_load(avg[1], EXP_60s, pct);
269 avg[2] = calc_load(avg[2], EXP_300s, pct);
272 static bool update_stats(struct psi_group *group)
274 u64 deltas[NR_PSI_STATES - 1] = { 0, };
275 unsigned long missed_periods = 0;
276 unsigned long nonidle_total = 0;
277 u64 now, expires, period;
281 mutex_lock(&group->stat_lock);
284 * Collect the per-cpu time buckets and average them into a
285 * single time sample that is normalized to wallclock time.
287 * For averaging, each CPU is weighted by its non-idle time in
288 * the sampling period. This eliminates artifacts from uneven
289 * loading, or even entirely idle CPUs.
291 for_each_possible_cpu(cpu) {
292 u32 times[NR_PSI_STATES];
295 get_recent_times(group, cpu, times);
297 nonidle = nsecs_to_jiffies(times[PSI_NONIDLE]);
298 nonidle_total += nonidle;
300 for (s = 0; s < PSI_NONIDLE; s++)
301 deltas[s] += (u64)times[s] * nonidle;
305 * Integrate the sample into the running statistics that are
306 * reported to userspace: the cumulative stall times and the
309 * Pressure percentages are sampled at PSI_FREQ. We might be
310 * called more often when the user polls more frequently than
311 * that; we might be called less often when there is no task
312 * activity, thus no data, and clock ticks are sporadic. The
313 * below handles both.
317 for (s = 0; s < NR_PSI_STATES - 1; s++)
318 group->total[s] += div_u64(deltas[s], max(nonidle_total, 1UL));
322 expires = group->next_update;
325 if (now - expires >= psi_period)
326 missed_periods = div_u64(now - expires, psi_period);
329 * The periodic clock tick can get delayed for various
330 * reasons, especially on loaded systems. To avoid clock
331 * drift, we schedule the clock in fixed psi_period intervals.
332 * But the deltas we sample out of the per-cpu buckets above
333 * are based on the actual time elapsing between clock ticks.
335 group->next_update = expires + ((1 + missed_periods) * psi_period);
336 period = now - (group->last_update + (missed_periods * psi_period));
337 group->last_update = now;
339 for (s = 0; s < NR_PSI_STATES - 1; s++) {
342 sample = group->total[s] - group->total_prev[s];
344 * Due to the lockless sampling of the time buckets,
345 * recorded time deltas can slip into the next period,
346 * which under full pressure can result in samples in
347 * excess of the period length.
349 * We don't want to report non-sensical pressures in
350 * excess of 100%, nor do we want to drop such events
351 * on the floor. Instead we punt any overage into the
352 * future until pressure subsides. By doing this we
353 * don't underreport the occurring pressure curve, we
354 * just report it delayed by one period length.
356 * The error isn't cumulative. As soon as another
357 * delta slips from a period P to P+1, by definition
358 * it frees up its time T in P.
362 group->total_prev[s] += sample;
363 calc_avgs(group->avg[s], missed_periods, sample, period);
366 mutex_unlock(&group->stat_lock);
367 return nonidle_total;
370 static void psi_update_work(struct work_struct *work)
372 struct delayed_work *dwork;
373 struct psi_group *group;
376 dwork = to_delayed_work(work);
377 group = container_of(dwork, struct psi_group, clock_work);
380 * If there is task activity, periodically fold the per-cpu
381 * times and feed samples into the running averages. If things
382 * are idle and there is no data to process, stop the clock.
383 * Once restarted, we'll catch up the running averages in one
384 * go - see calc_avgs() and missed_periods.
387 nonidle = update_stats(group);
390 unsigned long delay = 0;
394 if (group->next_update > now)
395 delay = nsecs_to_jiffies(group->next_update - now) + 1;
396 schedule_delayed_work(dwork, delay);
400 static void record_times(struct psi_group_cpu *groupc, int cpu,
406 now = cpu_clock(cpu);
407 delta = now - groupc->state_start;
408 groupc->state_start = now;
410 if (groupc->state_mask & (1 << PSI_IO_SOME)) {
411 groupc->times[PSI_IO_SOME] += delta;
412 if (groupc->state_mask & (1 << PSI_IO_FULL))
413 groupc->times[PSI_IO_FULL] += delta;
416 if (groupc->state_mask & (1 << PSI_MEM_SOME)) {
417 groupc->times[PSI_MEM_SOME] += delta;
418 if (groupc->state_mask & (1 << PSI_MEM_FULL))
419 groupc->times[PSI_MEM_FULL] += delta;
420 else if (memstall_tick) {
423 * Since we care about lost potential, a
424 * memstall is FULL when there are no other
425 * working tasks, but also when the CPU is
426 * actively reclaiming and nothing productive
427 * could run even if it were runnable.
429 * When the timer tick sees a reclaiming CPU,
430 * regardless of runnable tasks, sample a FULL
431 * tick (or less if it hasn't been a full tick
432 * since the last state change).
434 sample = min(delta, (u32)jiffies_to_nsecs(1));
435 groupc->times[PSI_MEM_FULL] += sample;
439 if (groupc->state_mask & (1 << PSI_CPU_SOME))
440 groupc->times[PSI_CPU_SOME] += delta;
442 if (groupc->state_mask & (1 << PSI_NONIDLE))
443 groupc->times[PSI_NONIDLE] += delta;
446 static void psi_group_change(struct psi_group *group, int cpu,
447 unsigned int clear, unsigned int set)
449 struct psi_group_cpu *groupc;
454 groupc = per_cpu_ptr(group->pcpu, cpu);
457 * First we assess the aggregate resource states this CPU's
458 * tasks have been in since the last change, and account any
459 * SOME and FULL time these may have resulted in.
461 * Then we update the task counts according to the state
462 * change requested through the @clear and @set bits.
464 write_seqcount_begin(&groupc->seq);
466 record_times(groupc, cpu, false);
468 for (t = 0, m = clear; m; m &= ~(1 << t), t++) {
471 if (groupc->tasks[t] == 0 && !psi_bug) {
472 printk_deferred(KERN_ERR "psi: task underflow! cpu=%d t=%d tasks=[%u %u %u] clear=%x set=%x\n",
473 cpu, t, groupc->tasks[0],
474 groupc->tasks[1], groupc->tasks[2],
481 for (t = 0; set; set &= ~(1 << t), t++)
485 /* Calculate state mask representing active states */
486 for (s = 0; s < NR_PSI_STATES; s++) {
487 if (test_state(groupc->tasks, s))
488 state_mask |= (1 << s);
490 groupc->state_mask = state_mask;
492 write_seqcount_end(&groupc->seq);
495 static struct psi_group *iterate_groups(struct task_struct *task, void **iter)
497 #ifdef CONFIG_CGROUPS
498 struct cgroup *cgroup = NULL;
501 cgroup = task->cgroups->dfl_cgrp;
502 else if (*iter == &psi_system)
505 cgroup = cgroup_parent(*iter);
507 if (cgroup && cgroup_parent(cgroup)) {
509 return cgroup_psi(cgroup);
519 void psi_task_change(struct task_struct *task, int clear, int set)
521 int cpu = task_cpu(task);
522 struct psi_group *group;
523 bool wake_clock = true;
529 if (((task->psi_flags & set) ||
530 (task->psi_flags & clear) != clear) &&
532 printk_deferred(KERN_ERR "psi: inconsistent task state! task=%d:%s cpu=%d psi_flags=%x clear=%x set=%x\n",
533 task->pid, task->comm, cpu,
534 task->psi_flags, clear, set);
538 task->psi_flags &= ~clear;
539 task->psi_flags |= set;
542 * Periodic aggregation shuts off if there is a period of no
543 * task changes, so we wake it back up if necessary. However,
544 * don't do this if the task change is the aggregation worker
545 * itself going to sleep, or we'll ping-pong forever.
547 if (unlikely((clear & TSK_RUNNING) &&
548 (task->flags & PF_WQ_WORKER) &&
549 wq_worker_last_func(task) == psi_update_work))
552 while ((group = iterate_groups(task, &iter))) {
553 psi_group_change(group, cpu, clear, set);
554 if (wake_clock && !delayed_work_pending(&group->clock_work))
555 schedule_delayed_work(&group->clock_work, PSI_FREQ);
559 void psi_memstall_tick(struct task_struct *task, int cpu)
561 struct psi_group *group;
564 while ((group = iterate_groups(task, &iter))) {
565 struct psi_group_cpu *groupc;
567 groupc = per_cpu_ptr(group->pcpu, cpu);
568 write_seqcount_begin(&groupc->seq);
569 record_times(groupc, cpu, true);
570 write_seqcount_end(&groupc->seq);
575 * psi_memstall_enter - mark the beginning of a memory stall section
576 * @flags: flags to handle nested sections
578 * Marks the calling task as being stalled due to a lack of memory,
579 * such as waiting for a refault or performing reclaim.
581 void psi_memstall_enter(unsigned long *flags)
586 if (static_branch_likely(&psi_disabled))
589 *flags = current->flags & PF_MEMSTALL;
593 * PF_MEMSTALL setting & accounting needs to be atomic wrt
594 * changes to the task's scheduling state, otherwise we can
595 * race with CPU migration.
597 rq = this_rq_lock_irq(&rf);
599 current->flags |= PF_MEMSTALL;
600 psi_task_change(current, 0, TSK_MEMSTALL);
602 rq_unlock_irq(rq, &rf);
606 * psi_memstall_leave - mark the end of an memory stall section
607 * @flags: flags to handle nested memdelay sections
609 * Marks the calling task as no longer stalled due to lack of memory.
611 void psi_memstall_leave(unsigned long *flags)
616 if (static_branch_likely(&psi_disabled))
622 * PF_MEMSTALL clearing & accounting needs to be atomic wrt
623 * changes to the task's scheduling state, otherwise we could
624 * race with CPU migration.
626 rq = this_rq_lock_irq(&rf);
628 current->flags &= ~PF_MEMSTALL;
629 psi_task_change(current, TSK_MEMSTALL, 0);
631 rq_unlock_irq(rq, &rf);
634 #ifdef CONFIG_CGROUPS
635 int psi_cgroup_alloc(struct cgroup *cgroup)
637 if (static_branch_likely(&psi_disabled))
640 cgroup->psi.pcpu = alloc_percpu(struct psi_group_cpu);
641 if (!cgroup->psi.pcpu)
643 group_init(&cgroup->psi);
647 void psi_cgroup_free(struct cgroup *cgroup)
649 if (static_branch_likely(&psi_disabled))
652 cancel_delayed_work_sync(&cgroup->psi.clock_work);
653 free_percpu(cgroup->psi.pcpu);
657 * cgroup_move_task - move task to a different cgroup
659 * @to: the target css_set
661 * Move task to a new cgroup and safely migrate its associated stall
662 * state between the different groups.
664 * This function acquires the task's rq lock to lock out concurrent
665 * changes to the task's scheduling state and - in case the task is
666 * running - concurrent changes to its stall state.
668 void cgroup_move_task(struct task_struct *task, struct css_set *to)
670 unsigned int task_flags = 0;
674 if (static_branch_likely(&psi_disabled)) {
676 * Lame to do this here, but the scheduler cannot be locked
677 * from the outside, so we move cgroups from inside sched/.
679 rcu_assign_pointer(task->cgroups, to);
683 rq = task_rq_lock(task, &rf);
685 if (task_on_rq_queued(task))
686 task_flags = TSK_RUNNING;
687 else if (task->in_iowait)
688 task_flags = TSK_IOWAIT;
690 if (task->flags & PF_MEMSTALL)
691 task_flags |= TSK_MEMSTALL;
694 psi_task_change(task, task_flags, 0);
696 /* See comment above */
697 rcu_assign_pointer(task->cgroups, to);
700 psi_task_change(task, 0, task_flags);
702 task_rq_unlock(rq, task, &rf);
704 #endif /* CONFIG_CGROUPS */
706 int psi_show(struct seq_file *m, struct psi_group *group, enum psi_res res)
710 if (static_branch_likely(&psi_disabled))
715 for (full = 0; full < 2 - (res == PSI_CPU); full++) {
716 unsigned long avg[3];
720 for (w = 0; w < 3; w++)
721 avg[w] = group->avg[res * 2 + full][w];
722 total = div_u64(group->total[res * 2 + full], NSEC_PER_USEC);
724 seq_printf(m, "%s avg10=%lu.%02lu avg60=%lu.%02lu avg300=%lu.%02lu total=%llu\n",
725 full ? "full" : "some",
726 LOAD_INT(avg[0]), LOAD_FRAC(avg[0]),
727 LOAD_INT(avg[1]), LOAD_FRAC(avg[1]),
728 LOAD_INT(avg[2]), LOAD_FRAC(avg[2]),
735 static int psi_io_show(struct seq_file *m, void *v)
737 return psi_show(m, &psi_system, PSI_IO);
740 static int psi_memory_show(struct seq_file *m, void *v)
742 return psi_show(m, &psi_system, PSI_MEM);
745 static int psi_cpu_show(struct seq_file *m, void *v)
747 return psi_show(m, &psi_system, PSI_CPU);
750 static int psi_io_open(struct inode *inode, struct file *file)
752 return single_open(file, psi_io_show, NULL);
755 static int psi_memory_open(struct inode *inode, struct file *file)
757 return single_open(file, psi_memory_show, NULL);
760 static int psi_cpu_open(struct inode *inode, struct file *file)
762 return single_open(file, psi_cpu_show, NULL);
765 static const struct file_operations psi_io_fops = {
769 .release = single_release,
772 static const struct file_operations psi_memory_fops = {
773 .open = psi_memory_open,
776 .release = single_release,
779 static const struct file_operations psi_cpu_fops = {
780 .open = psi_cpu_open,
783 .release = single_release,
786 static int __init psi_proc_init(void)
788 proc_mkdir("pressure", NULL);
789 proc_create("pressure/io", 0, NULL, &psi_io_fops);
790 proc_create("pressure/memory", 0, NULL, &psi_memory_fops);
791 proc_create("pressure/cpu", 0, NULL, &psi_cpu_fops);
794 module_init(psi_proc_init);