]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - kernel/sched/stats.h
sched/smt: Make sched_smt_present track topology
[linux.git] / kernel / sched / stats.h
index 8aea199a39b4a61c6f53f4d44b876ec1798dab3e..4904c46770007f4bd0ef2c8cb6e8b567ce300864 100644 (file)
@@ -55,6 +55,92 @@ static inline void rq_sched_info_depart  (struct rq *rq, unsigned long long delt
 # define   schedstat_val_or_zero(var)  0
 #endif /* CONFIG_SCHEDSTATS */
 
+#ifdef CONFIG_PSI
+/*
+ * PSI tracks state that persists across sleeps, such as iowaits and
+ * memory stalls. As a result, it has to distinguish between sleeps,
+ * where a task's runnable state changes, and requeues, where a task
+ * and its state are being moved between CPUs and runqueues.
+ */
+static inline void psi_enqueue(struct task_struct *p, bool wakeup)
+{
+       int clear = 0, set = TSK_RUNNING;
+
+       if (psi_disabled)
+               return;
+
+       if (!wakeup || p->sched_psi_wake_requeue) {
+               if (p->flags & PF_MEMSTALL)
+                       set |= TSK_MEMSTALL;
+               if (p->sched_psi_wake_requeue)
+                       p->sched_psi_wake_requeue = 0;
+       } else {
+               if (p->in_iowait)
+                       clear |= TSK_IOWAIT;
+       }
+
+       psi_task_change(p, clear, set);
+}
+
+static inline void psi_dequeue(struct task_struct *p, bool sleep)
+{
+       int clear = TSK_RUNNING, set = 0;
+
+       if (psi_disabled)
+               return;
+
+       if (!sleep) {
+               if (p->flags & PF_MEMSTALL)
+                       clear |= TSK_MEMSTALL;
+       } else {
+               if (p->in_iowait)
+                       set |= TSK_IOWAIT;
+       }
+
+       psi_task_change(p, clear, set);
+}
+
+static inline void psi_ttwu_dequeue(struct task_struct *p)
+{
+       if (psi_disabled)
+               return;
+       /*
+        * Is the task being migrated during a wakeup? Make sure to
+        * deregister its sleep-persistent psi states from the old
+        * queue, and let psi_enqueue() know it has to requeue.
+        */
+       if (unlikely(p->in_iowait || (p->flags & PF_MEMSTALL))) {
+               struct rq_flags rf;
+               struct rq *rq;
+               int clear = 0;
+
+               if (p->in_iowait)
+                       clear |= TSK_IOWAIT;
+               if (p->flags & PF_MEMSTALL)
+                       clear |= TSK_MEMSTALL;
+
+               rq = __task_rq_lock(p, &rf);
+               psi_task_change(p, clear, 0);
+               p->sched_psi_wake_requeue = 1;
+               __task_rq_unlock(rq, &rf);
+       }
+}
+
+static inline void psi_task_tick(struct rq *rq)
+{
+       if (psi_disabled)
+               return;
+
+       if (unlikely(rq->curr->flags & PF_MEMSTALL))
+               psi_memstall_tick(rq->curr, cpu_of(rq));
+}
+#else /* CONFIG_PSI */
+static inline void psi_enqueue(struct task_struct *p, bool wakeup) {}
+static inline void psi_dequeue(struct task_struct *p, bool sleep) {}
+static inline void psi_ttwu_dequeue(struct task_struct *p) {}
+static inline void psi_task_tick(struct rq *rq) {}
+#endif /* CONFIG_PSI */
+
 #ifdef CONFIG_SCHED_INFO
 static inline void sched_info_reset_dequeued(struct task_struct *t)
 {