2 * SPDX-License-Identifier: MIT
4 * Copyright © 2018 Intel Corporation
7 #include <linux/mutex.h>
10 #include "i915_globals.h"
11 #include "i915_request.h"
12 #include "i915_scheduler.h"
14 static struct i915_global_scheduler {
15 struct i915_global base;
16 struct kmem_cache *slab_dependencies;
17 struct kmem_cache *slab_priorities;
20 static DEFINE_SPINLOCK(schedule_lock);
22 static const struct i915_request *
23 node_to_request(const struct i915_sched_node *node)
25 return container_of(node, const struct i915_request, sched);
28 static inline bool node_started(const struct i915_sched_node *node)
30 return i915_request_started(node_to_request(node));
33 static inline bool node_signaled(const struct i915_sched_node *node)
35 return i915_request_completed(node_to_request(node));
38 static inline struct i915_priolist *to_priolist(struct rb_node *rb)
40 return rb_entry(rb, struct i915_priolist, node);
43 static void assert_priolists(struct intel_engine_execlists * const execlists)
48 if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
51 GEM_BUG_ON(rb_first_cached(&execlists->queue) !=
52 rb_first(&execlists->queue.rb_root));
54 last_prio = (INT_MAX >> I915_USER_PRIORITY_SHIFT) + 1;
55 for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
56 const struct i915_priolist *p = to_priolist(rb);
58 GEM_BUG_ON(p->priority >= last_prio);
59 last_prio = p->priority;
62 for (i = 0; i < ARRAY_SIZE(p->requests); i++) {
63 if (list_empty(&p->requests[i]))
66 GEM_BUG_ON(!(p->used & BIT(i)));
72 i915_sched_lookup_priolist(struct intel_engine_cs *engine, int prio)
74 struct intel_engine_execlists * const execlists = &engine->execlists;
75 struct i915_priolist *p;
76 struct rb_node **parent, *rb;
80 lockdep_assert_held(&engine->timeline.lock);
81 assert_priolists(execlists);
83 /* buckets sorted from highest [in slot 0] to lowest priority */
84 idx = I915_PRIORITY_COUNT - (prio & I915_PRIORITY_MASK) - 1;
85 prio >>= I915_USER_PRIORITY_SHIFT;
86 if (unlikely(execlists->no_priolist))
87 prio = I915_PRIORITY_NORMAL;
90 /* most positive priority is scheduled first, equal priorities fifo */
92 parent = &execlists->queue.rb_root.rb_node;
96 if (prio > p->priority) {
97 parent = &rb->rb_left;
98 } else if (prio < p->priority) {
99 parent = &rb->rb_right;
106 if (prio == I915_PRIORITY_NORMAL) {
107 p = &execlists->default_priolist;
109 p = kmem_cache_alloc(global.slab_priorities, GFP_ATOMIC);
110 /* Convert an allocation failure to a priority bump */
112 prio = I915_PRIORITY_NORMAL; /* recurses just once */
114 /* To maintain ordering with all rendering, after an
115 * allocation failure we have to disable all scheduling.
116 * Requests will then be executed in fifo, and schedule
117 * will ensure that dependencies are emitted in fifo.
118 * There will be still some reordering with existing
119 * requests, so if userspace lied about their
120 * dependencies that reordering may be visible.
122 execlists->no_priolist = true;
128 for (i = 0; i < ARRAY_SIZE(p->requests); i++)
129 INIT_LIST_HEAD(&p->requests[i]);
130 rb_link_node(&p->node, rb, parent);
131 rb_insert_color_cached(&p->node, &execlists->queue, first);
136 return &p->requests[idx];
139 void __i915_priolist_free(struct i915_priolist *p)
141 kmem_cache_free(global.slab_priorities, p);
145 struct list_head *priolist;
148 static struct intel_engine_cs *
149 sched_lock_engine(const struct i915_sched_node *node,
150 struct intel_engine_cs *locked,
151 struct sched_cache *cache)
153 struct intel_engine_cs *engine = node_to_request(node)->engine;
157 if (engine != locked) {
158 spin_unlock(&locked->timeline.lock);
159 memset(cache, 0, sizeof(*cache));
160 spin_lock(&engine->timeline.lock);
166 static bool inflight(const struct i915_request *rq,
167 const struct intel_engine_cs *engine)
169 const struct i915_request *active;
171 if (!i915_request_is_active(rq))
174 active = port_request(engine->execlists.port);
175 return active->hw_context == rq->hw_context;
178 static void __i915_schedule(struct i915_sched_node *node,
179 const struct i915_sched_attr *attr)
181 struct intel_engine_cs *engine;
182 struct i915_dependency *dep, *p;
183 struct i915_dependency stack;
184 const int prio = attr->priority;
185 struct sched_cache cache;
188 /* Needed in order to use the temporary link inside i915_dependency */
189 lockdep_assert_held(&schedule_lock);
190 GEM_BUG_ON(prio == I915_PRIORITY_INVALID);
192 if (node_signaled(node))
195 if (prio <= READ_ONCE(node->attr.priority))
198 stack.signaler = node;
199 list_add(&stack.dfs_link, &dfs);
202 * Recursively bump all dependent priorities to match the new request.
204 * A naive approach would be to use recursion:
205 * static void update_priorities(struct i915_sched_node *node, prio) {
206 * list_for_each_entry(dep, &node->signalers_list, signal_link)
207 * update_priorities(dep->signal, prio)
208 * queue_request(node);
210 * but that may have unlimited recursion depth and so runs a very
211 * real risk of overunning the kernel stack. Instead, we build
212 * a flat list of all dependencies starting with the current request.
213 * As we walk the list of dependencies, we add all of its dependencies
214 * to the end of the list (this may include an already visited
215 * request) and continue to walk onwards onto the new dependencies. The
216 * end result is a topological list of requests in reverse order, the
217 * last element in the list is the request we must execute first.
219 list_for_each_entry(dep, &dfs, dfs_link) {
220 struct i915_sched_node *node = dep->signaler;
222 /* If we are already flying, we know we have no signalers */
223 if (node_started(node))
227 * Within an engine, there can be no cycle, but we may
228 * refer to the same dependency chain multiple times
229 * (redundant dependencies are not eliminated) and across
232 list_for_each_entry(p, &node->signalers_list, signal_link) {
233 GEM_BUG_ON(p == dep); /* no cycles! */
235 if (node_signaled(p->signaler))
238 if (prio > READ_ONCE(p->signaler->attr.priority))
239 list_move_tail(&p->dfs_link, &dfs);
244 * If we didn't need to bump any existing priorities, and we haven't
245 * yet submitted this request (i.e. there is no potential race with
246 * execlists_submit_request()), we can set our own priority and skip
247 * acquiring the engine locks.
249 if (node->attr.priority == I915_PRIORITY_INVALID) {
250 GEM_BUG_ON(!list_empty(&node->link));
253 if (stack.dfs_link.next == stack.dfs_link.prev)
256 __list_del_entry(&stack.dfs_link);
259 memset(&cache, 0, sizeof(cache));
260 engine = node_to_request(node)->engine;
261 spin_lock(&engine->timeline.lock);
263 /* Fifo and depth-first replacement ensure our deps execute before us */
264 list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) {
265 INIT_LIST_HEAD(&dep->dfs_link);
267 node = dep->signaler;
268 engine = sched_lock_engine(node, engine, &cache);
269 lockdep_assert_held(&engine->timeline.lock);
271 /* Recheck after acquiring the engine->timeline.lock */
272 if (prio <= node->attr.priority || node_signaled(node))
275 node->attr.priority = prio;
276 if (!list_empty(&node->link)) {
279 i915_sched_lookup_priolist(engine,
281 list_move_tail(&node->link, cache.priolist);
284 * If the request is not in the priolist queue because
285 * it is not yet runnable, then it doesn't contribute
286 * to our preemption decisions. On the other hand,
287 * if the request is on the HW, it too is not in the
288 * queue; but in that case we may still need to reorder
289 * the inflight requests.
291 if (!i915_sw_fence_done(&node_to_request(node)->submit))
295 if (prio <= engine->execlists.queue_priority_hint)
298 engine->execlists.queue_priority_hint = prio;
301 * If we are already the currently executing context, don't
302 * bother evaluating if we should preempt ourselves.
304 if (inflight(node_to_request(node), engine))
307 /* Defer (tasklet) submission until after all of our updates. */
308 tasklet_hi_schedule(&engine->execlists.tasklet);
311 spin_unlock(&engine->timeline.lock);
314 void i915_schedule(struct i915_request *rq, const struct i915_sched_attr *attr)
316 spin_lock_irq(&schedule_lock);
317 __i915_schedule(&rq->sched, attr);
318 spin_unlock_irq(&schedule_lock);
321 static void __bump_priority(struct i915_sched_node *node, unsigned int bump)
323 struct i915_sched_attr attr = node->attr;
325 attr.priority |= bump;
326 __i915_schedule(node, &attr);
329 void i915_schedule_bump_priority(struct i915_request *rq, unsigned int bump)
333 GEM_BUG_ON(bump & ~I915_PRIORITY_MASK);
335 if (READ_ONCE(rq->sched.attr.priority) == I915_PRIORITY_INVALID)
338 spin_lock_irqsave(&schedule_lock, flags);
339 __bump_priority(&rq->sched, bump);
340 spin_unlock_irqrestore(&schedule_lock, flags);
343 void i915_sched_node_init(struct i915_sched_node *node)
345 INIT_LIST_HEAD(&node->signalers_list);
346 INIT_LIST_HEAD(&node->waiters_list);
347 INIT_LIST_HEAD(&node->link);
348 node->attr.priority = I915_PRIORITY_INVALID;
349 node->semaphores = 0;
353 static struct i915_dependency *
354 i915_dependency_alloc(void)
356 return kmem_cache_alloc(global.slab_dependencies, GFP_KERNEL);
360 i915_dependency_free(struct i915_dependency *dep)
362 kmem_cache_free(global.slab_dependencies, dep);
365 bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
366 struct i915_sched_node *signal,
367 struct i915_dependency *dep,
372 spin_lock_irq(&schedule_lock);
374 if (!node_signaled(signal)) {
375 INIT_LIST_HEAD(&dep->dfs_link);
376 list_add(&dep->wait_link, &signal->waiters_list);
377 list_add(&dep->signal_link, &node->signalers_list);
378 dep->signaler = signal;
381 /* Keep track of whether anyone on this chain has a semaphore */
382 if (signal->flags & I915_SCHED_HAS_SEMAPHORE_CHAIN &&
383 !node_started(signal))
384 node->flags |= I915_SCHED_HAS_SEMAPHORE_CHAIN;
387 * As we do not allow WAIT to preempt inflight requests,
388 * once we have executed a request, along with triggering
389 * any execution callbacks, we must preserve its ordering
390 * within the non-preemptible FIFO.
392 BUILD_BUG_ON(__NO_PREEMPTION & ~I915_PRIORITY_MASK);
393 if (flags & I915_DEPENDENCY_EXTERNAL)
394 __bump_priority(signal, __NO_PREEMPTION);
399 spin_unlock_irq(&schedule_lock);
404 int i915_sched_node_add_dependency(struct i915_sched_node *node,
405 struct i915_sched_node *signal)
407 struct i915_dependency *dep;
409 dep = i915_dependency_alloc();
413 if (!__i915_sched_node_add_dependency(node, signal, dep,
414 I915_DEPENDENCY_EXTERNAL |
415 I915_DEPENDENCY_ALLOC))
416 i915_dependency_free(dep);
421 void i915_sched_node_fini(struct i915_sched_node *node)
423 struct i915_dependency *dep, *tmp;
425 GEM_BUG_ON(!list_empty(&node->link));
427 spin_lock_irq(&schedule_lock);
430 * Everyone we depended upon (the fences we wait to be signaled)
431 * should retire before us and remove themselves from our list.
432 * However, retirement is run independently on each timeline and
433 * so we may be called out-of-order.
435 list_for_each_entry_safe(dep, tmp, &node->signalers_list, signal_link) {
436 GEM_BUG_ON(!node_signaled(dep->signaler));
437 GEM_BUG_ON(!list_empty(&dep->dfs_link));
439 list_del(&dep->wait_link);
440 if (dep->flags & I915_DEPENDENCY_ALLOC)
441 i915_dependency_free(dep);
444 /* Remove ourselves from everyone who depends upon us */
445 list_for_each_entry_safe(dep, tmp, &node->waiters_list, wait_link) {
446 GEM_BUG_ON(dep->signaler != node);
447 GEM_BUG_ON(!list_empty(&dep->dfs_link));
449 list_del(&dep->signal_link);
450 if (dep->flags & I915_DEPENDENCY_ALLOC)
451 i915_dependency_free(dep);
454 spin_unlock_irq(&schedule_lock);
457 static void i915_global_scheduler_shrink(void)
459 kmem_cache_shrink(global.slab_dependencies);
460 kmem_cache_shrink(global.slab_priorities);
463 static void i915_global_scheduler_exit(void)
465 kmem_cache_destroy(global.slab_dependencies);
466 kmem_cache_destroy(global.slab_priorities);
469 static struct i915_global_scheduler global = { {
470 .shrink = i915_global_scheduler_shrink,
471 .exit = i915_global_scheduler_exit,
474 int __init i915_global_scheduler_init(void)
476 global.slab_dependencies = KMEM_CACHE(i915_dependency,
478 if (!global.slab_dependencies)
481 global.slab_priorities = KMEM_CACHE(i915_priolist,
483 if (!global.slab_priorities)
486 i915_global_register(&global.base);
490 kmem_cache_destroy(global.slab_priorities);