2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include <linux/kthread.h>
26 #include <uapi/linux/sched/types.h>
31 #define task_asleep(tsk) ((tsk)->state & TASK_NORMAL && !(tsk)->on_cpu)
33 #define task_asleep(tsk) ((tsk)->state & TASK_NORMAL)
36 static unsigned int __intel_breadcrumbs_wakeup(struct intel_breadcrumbs *b)
38 struct intel_wait *wait;
39 unsigned int result = 0;
41 lockdep_assert_held(&b->irq_lock);
46 * N.B. Since task_asleep() and ttwu are not atomic, the
47 * waiter may actually go to sleep after the check, causing
48 * us to suppress a valid wakeup. We prefer to reduce the
49 * number of false positive missed_breadcrumb() warnings
50 * at the expense of a few false negatives, as it it easy
51 * to trigger a false positive under heavy load. Enough
52 * signal should remain from genuine missed_breadcrumb()
53 * for us to detect in CI.
55 bool was_asleep = task_asleep(wait->tsk);
57 result = ENGINE_WAKEUP_WAITER;
58 if (wake_up_process(wait->tsk) && was_asleep)
59 result |= ENGINE_WAKEUP_ASLEEP;
65 unsigned int intel_engine_wakeup(struct intel_engine_cs *engine)
67 struct intel_breadcrumbs *b = &engine->breadcrumbs;
71 spin_lock_irqsave(&b->irq_lock, flags);
72 result = __intel_breadcrumbs_wakeup(b);
73 spin_unlock_irqrestore(&b->irq_lock, flags);
78 static unsigned long wait_timeout(void)
80 return round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES);
83 static noinline void missed_breadcrumb(struct intel_engine_cs *engine)
85 if (drm_debug & DRM_UT_DRIVER) {
86 struct drm_printer p = drm_debug_printer(__func__);
88 intel_engine_dump(engine, &p,
89 "%s missed breadcrumb at %pS\n",
90 engine->name, __builtin_return_address(0));
93 set_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings);
96 static void intel_breadcrumbs_hangcheck(struct timer_list *t)
98 struct intel_engine_cs *engine =
99 from_timer(engine, t, breadcrumbs.hangcheck);
100 struct intel_breadcrumbs *b = &engine->breadcrumbs;
105 if (b->hangcheck_interrupts != atomic_read(&engine->irq_count)) {
106 b->hangcheck_interrupts = atomic_read(&engine->irq_count);
107 mod_timer(&b->hangcheck, wait_timeout());
111 /* We keep the hangcheck timer alive until we disarm the irq, even
112 * if there are no waiters at present.
114 * If the waiter was currently running, assume it hasn't had a chance
115 * to process the pending interrupt (e.g, low priority task on a loaded
116 * system) and wait until it sleeps before declaring a missed interrupt.
118 * If the waiter was asleep (and not even pending a wakeup), then we
119 * must have missed an interrupt as the GPU has stopped advancing
120 * but we still have a waiter. Assuming all batches complete within
121 * DRM_I915_HANGCHECK_JIFFIES [1.5s]!
123 if (intel_engine_wakeup(engine) & ENGINE_WAKEUP_ASLEEP) {
124 missed_breadcrumb(engine);
125 mod_timer(&b->fake_irq, jiffies + 1);
127 mod_timer(&b->hangcheck, wait_timeout());
131 static void intel_breadcrumbs_fake_irq(struct timer_list *t)
133 struct intel_engine_cs *engine = from_timer(engine, t,
134 breadcrumbs.fake_irq);
135 struct intel_breadcrumbs *b = &engine->breadcrumbs;
137 /* The timer persists in case we cannot enable interrupts,
138 * or if we have previously seen seqno/interrupt incoherency
139 * ("missed interrupt" syndrome, better known as a "missed breadcrumb").
140 * Here the worker will wake up every jiffie in order to kick the
141 * oldest waiter to do the coherent seqno check.
144 spin_lock_irq(&b->irq_lock);
145 if (b->irq_armed && !__intel_breadcrumbs_wakeup(b))
146 __intel_engine_disarm_breadcrumbs(engine);
147 spin_unlock_irq(&b->irq_lock);
151 mod_timer(&b->fake_irq, jiffies + 1);
153 /* Ensure that even if the GPU hangs, we get woken up.
155 * However, note that if no one is waiting, we never notice
156 * a gpu hang. Eventually, we will have to wait for a resource
157 * held by the GPU and so trigger a hangcheck. In the most
158 * pathological case, this will be upon memory starvation! To
159 * prevent this, we also queue the hangcheck from the retire
162 i915_queue_hangcheck(engine->i915);
165 static void irq_enable(struct intel_engine_cs *engine)
168 * FIXME: Ideally we want this on the API boundary, but for the
169 * sake of testing with mock breadcrumbs (no HW so unable to
170 * enable irqs) we place it deep within the bowels, at the point
173 GEM_BUG_ON(!intel_irqs_enabled(engine->i915));
175 /* Enabling the IRQ may miss the generation of the interrupt, but
176 * we still need to force the barrier before reading the seqno,
179 set_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
181 /* Caller disables interrupts */
182 spin_lock(&engine->i915->irq_lock);
183 engine->irq_enable(engine);
184 spin_unlock(&engine->i915->irq_lock);
187 static void irq_disable(struct intel_engine_cs *engine)
189 /* Caller disables interrupts */
190 spin_lock(&engine->i915->irq_lock);
191 engine->irq_disable(engine);
192 spin_unlock(&engine->i915->irq_lock);
195 void __intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine)
197 struct intel_breadcrumbs *b = &engine->breadcrumbs;
199 lockdep_assert_held(&b->irq_lock);
200 GEM_BUG_ON(b->irq_wait);
201 GEM_BUG_ON(!b->irq_armed);
203 GEM_BUG_ON(!b->irq_enabled);
204 if (!--b->irq_enabled)
207 b->irq_armed = false;
210 void intel_engine_pin_breadcrumbs_irq(struct intel_engine_cs *engine)
212 struct intel_breadcrumbs *b = &engine->breadcrumbs;
214 spin_lock_irq(&b->irq_lock);
215 if (!b->irq_enabled++)
217 GEM_BUG_ON(!b->irq_enabled); /* no overflow! */
218 spin_unlock_irq(&b->irq_lock);
221 void intel_engine_unpin_breadcrumbs_irq(struct intel_engine_cs *engine)
223 struct intel_breadcrumbs *b = &engine->breadcrumbs;
225 spin_lock_irq(&b->irq_lock);
226 GEM_BUG_ON(!b->irq_enabled); /* no underflow! */
227 if (!--b->irq_enabled)
229 spin_unlock_irq(&b->irq_lock);
232 void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine)
234 struct intel_breadcrumbs *b = &engine->breadcrumbs;
235 struct intel_wait *wait, *n;
238 goto wakeup_signaler;
241 * We only disarm the irq when we are idle (all requests completed),
242 * so if the bottom-half remains asleep, it missed the request
245 if (intel_engine_wakeup(engine) & ENGINE_WAKEUP_ASLEEP)
246 missed_breadcrumb(engine);
248 spin_lock_irq(&b->rb_lock);
250 spin_lock(&b->irq_lock);
253 __intel_engine_disarm_breadcrumbs(engine);
254 spin_unlock(&b->irq_lock);
256 rbtree_postorder_for_each_entry_safe(wait, n, &b->waiters, node) {
257 RB_CLEAR_NODE(&wait->node);
258 wake_up_process(wait->tsk);
260 b->waiters = RB_ROOT;
262 spin_unlock_irq(&b->rb_lock);
265 * The signaling thread may be asleep holding a reference to a request,
266 * that had its signaling cancelled prior to being preempted. We need
267 * to kick the signaler, just in case, to release any such reference.
270 wake_up_process(b->signaler);
273 static bool use_fake_irq(const struct intel_breadcrumbs *b)
275 const struct intel_engine_cs *engine =
276 container_of(b, struct intel_engine_cs, breadcrumbs);
278 if (!test_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings))
281 /* Only start with the heavy weight fake irq timer if we have not
282 * seen any interrupts since enabling it the first time. If the
283 * interrupts are still arriving, it means we made a mistake in our
284 * engine->seqno_barrier(), a timing error that should be transient
285 * and unlikely to reoccur.
287 return atomic_read(&engine->irq_count) == b->hangcheck_interrupts;
290 static void enable_fake_irq(struct intel_breadcrumbs *b)
292 /* Ensure we never sleep indefinitely */
293 if (!b->irq_enabled || use_fake_irq(b))
294 mod_timer(&b->fake_irq, jiffies + 1);
296 mod_timer(&b->hangcheck, wait_timeout());
299 static bool __intel_breadcrumbs_enable_irq(struct intel_breadcrumbs *b)
301 struct intel_engine_cs *engine =
302 container_of(b, struct intel_engine_cs, breadcrumbs);
303 struct drm_i915_private *i915 = engine->i915;
306 lockdep_assert_held(&b->irq_lock);
310 /* The breadcrumb irq will be disarmed on the interrupt after the
311 * waiters are signaled. This gives us a single interrupt window in
312 * which we can add a new waiter and avoid the cost of re-enabling
317 if (I915_SELFTEST_ONLY(b->mock)) {
318 /* For our mock objects we want to avoid interaction
319 * with the real hardware (which is not set up). So
320 * we simply pretend we have enabled the powerwell
321 * and the irq, and leave it up to the mock
322 * implementation to call intel_engine_wakeup()
323 * itself when it wants to simulate a user interrupt,
328 /* Since we are waiting on a request, the GPU should be busy
329 * and should have its own rpm reference. This is tracked
330 * by i915->gt.awake, we can forgo holding our own wakref
331 * for the interrupt as before i915->gt.awake is released (when
332 * the driver is idle) we disarm the breadcrumbs.
335 /* No interrupts? Kick the waiter every jiffie! */
337 if (!b->irq_enabled++ &&
338 !test_bit(engine->id, &i915->gpu_error.test_irq_rings)) {
347 static inline struct intel_wait *to_wait(struct rb_node *node)
349 return rb_entry(node, struct intel_wait, node);
352 static inline void __intel_breadcrumbs_finish(struct intel_breadcrumbs *b,
353 struct intel_wait *wait)
355 lockdep_assert_held(&b->rb_lock);
356 GEM_BUG_ON(b->irq_wait == wait);
358 /* This request is completed, so remove it from the tree, mark it as
359 * complete, and *then* wake up the associated task. N.B. when the
360 * task wakes up, it will find the empty rb_node, discern that it
361 * has already been removed from the tree and skip the serialisation
362 * of the b->rb_lock and b->irq_lock. This means that the destruction
363 * of the intel_wait is not serialised with the interrupt handler
364 * by the waiter - it must instead be serialised by the caller.
366 rb_erase(&wait->node, &b->waiters);
367 RB_CLEAR_NODE(&wait->node);
369 wake_up_process(wait->tsk); /* implicit smp_wmb() */
372 static inline void __intel_breadcrumbs_next(struct intel_engine_cs *engine,
373 struct rb_node *next)
375 struct intel_breadcrumbs *b = &engine->breadcrumbs;
377 spin_lock(&b->irq_lock);
378 GEM_BUG_ON(!b->irq_armed);
379 GEM_BUG_ON(!b->irq_wait);
380 b->irq_wait = to_wait(next);
381 spin_unlock(&b->irq_lock);
383 /* We always wake up the next waiter that takes over as the bottom-half
384 * as we may delegate not only the irq-seqno barrier to the next waiter
385 * but also the task of waking up concurrent waiters.
388 wake_up_process(to_wait(next)->tsk);
391 static bool __intel_engine_add_wait(struct intel_engine_cs *engine,
392 struct intel_wait *wait)
394 struct intel_breadcrumbs *b = &engine->breadcrumbs;
395 struct rb_node **p, *parent, *completed;
399 /* Insert the request into the retirement ordered list
400 * of waiters by walking the rbtree. If we are the oldest
401 * seqno in the tree (the first to be retired), then
402 * set ourselves as the bottom-half.
404 * As we descend the tree, prune completed branches since we hold the
405 * spinlock we know that the first_waiter must be delayed and can
406 * reduce some of the sequential wake up latency if we take action
407 * ourselves and wake up the completed tasks in parallel. Also, by
408 * removing stale elements in the tree, we may be able to reduce the
409 * ping-pong between the old bottom-half and ourselves as first-waiter.
415 seqno = intel_engine_get_seqno(engine);
417 /* If the request completed before we managed to grab the spinlock,
418 * return now before adding ourselves to the rbtree. We let the
419 * current bottom-half handle any pending wakeups and instead
420 * try and get out of the way quickly.
422 if (i915_seqno_passed(seqno, wait->seqno)) {
423 RB_CLEAR_NODE(&wait->node);
427 p = &b->waiters.rb_node;
430 if (wait->seqno == to_wait(parent)->seqno) {
431 /* We have multiple waiters on the same seqno, select
432 * the highest priority task (that with the smallest
433 * task->prio) to serve as the bottom-half for this
436 if (wait->tsk->prio > to_wait(parent)->tsk->prio) {
437 p = &parent->rb_right;
440 p = &parent->rb_left;
442 } else if (i915_seqno_passed(wait->seqno,
443 to_wait(parent)->seqno)) {
444 p = &parent->rb_right;
445 if (i915_seqno_passed(seqno, to_wait(parent)->seqno))
450 p = &parent->rb_left;
453 rb_link_node(&wait->node, parent, p);
454 rb_insert_color(&wait->node, &b->waiters);
457 spin_lock(&b->irq_lock);
459 /* After assigning ourselves as the new bottom-half, we must
460 * perform a cursory check to prevent a missed interrupt.
461 * Either we miss the interrupt whilst programming the hardware,
462 * or if there was a previous waiter (for a later seqno) they
463 * may be woken instead of us (due to the inherent race
464 * in the unlocked read of b->irq_seqno_bh in the irq handler)
465 * and so we miss the wake up.
467 armed = __intel_breadcrumbs_enable_irq(b);
468 spin_unlock(&b->irq_lock);
472 /* Advance the bottom-half (b->irq_wait) before we wake up
473 * the waiters who may scribble over their intel_wait
474 * just as the interrupt handler is dereferencing it via
478 struct rb_node *next = rb_next(completed);
479 GEM_BUG_ON(next == &wait->node);
480 __intel_breadcrumbs_next(engine, next);
484 struct intel_wait *crumb = to_wait(completed);
485 completed = rb_prev(completed);
486 __intel_breadcrumbs_finish(b, crumb);
490 GEM_BUG_ON(!b->irq_wait);
491 GEM_BUG_ON(!b->irq_armed);
492 GEM_BUG_ON(rb_first(&b->waiters) != &b->irq_wait->node);
497 bool intel_engine_add_wait(struct intel_engine_cs *engine,
498 struct intel_wait *wait)
500 struct intel_breadcrumbs *b = &engine->breadcrumbs;
503 spin_lock_irq(&b->rb_lock);
504 armed = __intel_engine_add_wait(engine, wait);
505 spin_unlock_irq(&b->rb_lock);
509 /* Make the caller recheck if its request has already started. */
510 return i915_seqno_passed(intel_engine_get_seqno(engine),
514 static inline bool chain_wakeup(struct rb_node *rb, int priority)
516 return rb && to_wait(rb)->tsk->prio <= priority;
519 static inline int wakeup_priority(struct intel_breadcrumbs *b,
520 struct task_struct *tsk)
522 if (tsk == b->signaler)
528 static void __intel_engine_remove_wait(struct intel_engine_cs *engine,
529 struct intel_wait *wait)
531 struct intel_breadcrumbs *b = &engine->breadcrumbs;
533 lockdep_assert_held(&b->rb_lock);
535 if (RB_EMPTY_NODE(&wait->node))
538 if (b->irq_wait == wait) {
539 const int priority = wakeup_priority(b, wait->tsk);
540 struct rb_node *next;
542 /* We are the current bottom-half. Find the next candidate,
543 * the first waiter in the queue on the remaining oldest
544 * request. As multiple seqnos may complete in the time it
545 * takes us to wake up and find the next waiter, we have to
546 * wake up that waiter for it to perform its own coherent
549 next = rb_next(&wait->node);
550 if (chain_wakeup(next, priority)) {
551 /* If the next waiter is already complete,
552 * wake it up and continue onto the next waiter. So
553 * if have a small herd, they will wake up in parallel
554 * rather than sequentially, which should reduce
555 * the overall latency in waking all the completed
558 * However, waking up a chain adds extra latency to
559 * the first_waiter. This is undesirable if that
560 * waiter is a high priority task.
562 u32 seqno = intel_engine_get_seqno(engine);
564 while (i915_seqno_passed(seqno, to_wait(next)->seqno)) {
565 struct rb_node *n = rb_next(next);
567 __intel_breadcrumbs_finish(b, to_wait(next));
569 if (!chain_wakeup(next, priority))
574 __intel_breadcrumbs_next(engine, next);
576 GEM_BUG_ON(rb_first(&b->waiters) == &wait->node);
579 GEM_BUG_ON(RB_EMPTY_NODE(&wait->node));
580 rb_erase(&wait->node, &b->waiters);
581 RB_CLEAR_NODE(&wait->node);
584 GEM_BUG_ON(b->irq_wait == wait);
585 GEM_BUG_ON(rb_first(&b->waiters) !=
586 (b->irq_wait ? &b->irq_wait->node : NULL));
589 void intel_engine_remove_wait(struct intel_engine_cs *engine,
590 struct intel_wait *wait)
592 struct intel_breadcrumbs *b = &engine->breadcrumbs;
594 /* Quick check to see if this waiter was already decoupled from
595 * the tree by the bottom-half to avoid contention on the spinlock
598 if (RB_EMPTY_NODE(&wait->node)) {
599 GEM_BUG_ON(READ_ONCE(b->irq_wait) == wait);
603 spin_lock_irq(&b->rb_lock);
604 __intel_engine_remove_wait(engine, wait);
605 spin_unlock_irq(&b->rb_lock);
608 static bool signal_valid(const struct drm_i915_gem_request *request)
610 return intel_wait_check_request(&request->signaling.wait, request);
613 static bool signal_complete(const struct drm_i915_gem_request *request)
618 /* If another process served as the bottom-half it may have already
619 * signalled that this wait is already completed.
621 if (intel_wait_complete(&request->signaling.wait))
622 return signal_valid(request);
624 /* Carefully check if the request is complete, giving time for the
625 * seqno to be visible or if the GPU hung.
627 if (__i915_request_irq_complete(request))
633 static struct drm_i915_gem_request *to_signaler(struct rb_node *rb)
635 return rb_entry(rb, struct drm_i915_gem_request, signaling.node);
638 static void signaler_set_rtpriority(void)
640 struct sched_param param = { .sched_priority = 1 };
642 sched_setscheduler_nocheck(current, SCHED_FIFO, ¶m);
645 static int intel_breadcrumbs_signaler(void *arg)
647 struct intel_engine_cs *engine = arg;
648 struct intel_breadcrumbs *b = &engine->breadcrumbs;
649 struct drm_i915_gem_request *request;
651 /* Install ourselves with high priority to reduce signalling latency */
652 signaler_set_rtpriority();
655 bool do_schedule = true;
657 set_current_state(TASK_INTERRUPTIBLE);
659 /* We are either woken up by the interrupt bottom-half,
660 * or by a client adding a new signaller. In both cases,
661 * the GPU seqno may have advanced beyond our oldest signal.
662 * If it has, propagate the signal, remove the waiter and
663 * check again with the next oldest signal. Otherwise we
664 * need to wait for a new interrupt from the GPU or for
668 request = rcu_dereference(b->first_signal);
670 request = i915_gem_request_get_rcu(request);
672 if (signal_complete(request)) {
674 dma_fence_signal(&request->fence);
675 local_bh_enable(); /* kick start the tasklets */
677 spin_lock_irq(&b->rb_lock);
679 /* Wake up all other completed waiters and select the
680 * next bottom-half for the next user interrupt.
682 __intel_engine_remove_wait(engine,
683 &request->signaling.wait);
685 /* Find the next oldest signal. Note that as we have
686 * not been holding the lock, another client may
687 * have installed an even older signal than the one
688 * we just completed - so double check we are still
689 * the oldest before picking the next one.
691 if (request == rcu_access_pointer(b->first_signal)) {
693 rb_next(&request->signaling.node);
694 rcu_assign_pointer(b->first_signal,
695 rb ? to_signaler(rb) : NULL);
697 rb_erase(&request->signaling.node, &b->signals);
698 RB_CLEAR_NODE(&request->signaling.node);
700 spin_unlock_irq(&b->rb_lock);
702 i915_gem_request_put(request);
704 /* If the engine is saturated we may be continually
705 * processing completed requests. This angers the
706 * NMI watchdog if we never let anything else
707 * have access to the CPU. Let's pretend to be nice
708 * and relinquish the CPU if we burn through the
709 * entire RT timeslice!
711 do_schedule = need_resched();
714 if (unlikely(do_schedule)) {
715 if (kthread_should_park())
718 if (unlikely(kthread_should_stop())) {
719 i915_gem_request_put(request);
725 i915_gem_request_put(request);
727 __set_current_state(TASK_RUNNING);
732 void intel_engine_enable_signaling(struct drm_i915_gem_request *request,
735 struct intel_engine_cs *engine = request->engine;
736 struct intel_breadcrumbs *b = &engine->breadcrumbs;
739 /* Note that we may be called from an interrupt handler on another
740 * device (e.g. nouveau signaling a fence completion causing us
741 * to submit a request, and so enable signaling). As such,
742 * we need to make sure that all other users of b->rb_lock protect
743 * against interrupts, i.e. use spin_lock_irqsave.
746 /* locked by dma_fence_enable_sw_signaling() (irqsafe fence->lock) */
747 GEM_BUG_ON(!irqs_disabled());
748 lockdep_assert_held(&request->lock);
750 seqno = i915_gem_request_global_seqno(request);
754 request->signaling.wait.tsk = b->signaler;
755 request->signaling.wait.request = request;
756 request->signaling.wait.seqno = seqno;
757 i915_gem_request_get(request);
759 spin_lock(&b->rb_lock);
761 /* First add ourselves into the list of waiters, but register our
762 * bottom-half as the signaller thread. As per usual, only the oldest
763 * waiter (not just signaller) is tasked as the bottom-half waking
764 * up all completed waiters after the user interrupt.
766 * If we are the oldest waiter, enable the irq (after which we
767 * must double check that the seqno did not complete).
769 wakeup &= __intel_engine_add_wait(engine, &request->signaling.wait);
771 if (!__i915_gem_request_completed(request, seqno)) {
772 struct rb_node *parent, **p;
775 /* Now insert ourselves into the retirement ordered list of
776 * signals on this engine. We track the oldest seqno as that
777 * will be the first signal to complete.
781 p = &b->signals.rb_node;
784 if (i915_seqno_passed(seqno,
785 to_signaler(parent)->signaling.wait.seqno)) {
786 p = &parent->rb_right;
789 p = &parent->rb_left;
792 rb_link_node(&request->signaling.node, parent, p);
793 rb_insert_color(&request->signaling.node, &b->signals);
795 rcu_assign_pointer(b->first_signal, request);
797 __intel_engine_remove_wait(engine, &request->signaling.wait);
798 i915_gem_request_put(request);
802 spin_unlock(&b->rb_lock);
805 wake_up_process(b->signaler);
808 void intel_engine_cancel_signaling(struct drm_i915_gem_request *request)
810 struct intel_engine_cs *engine = request->engine;
811 struct intel_breadcrumbs *b = &engine->breadcrumbs;
813 GEM_BUG_ON(!irqs_disabled());
814 lockdep_assert_held(&request->lock);
815 GEM_BUG_ON(!request->signaling.wait.seqno);
817 spin_lock(&b->rb_lock);
819 if (!RB_EMPTY_NODE(&request->signaling.node)) {
820 if (request == rcu_access_pointer(b->first_signal)) {
822 rb_next(&request->signaling.node);
823 rcu_assign_pointer(b->first_signal,
824 rb ? to_signaler(rb) : NULL);
826 rb_erase(&request->signaling.node, &b->signals);
827 RB_CLEAR_NODE(&request->signaling.node);
828 i915_gem_request_put(request);
831 __intel_engine_remove_wait(engine, &request->signaling.wait);
833 spin_unlock(&b->rb_lock);
835 request->signaling.wait.seqno = 0;
838 int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine)
840 struct intel_breadcrumbs *b = &engine->breadcrumbs;
841 struct task_struct *tsk;
843 spin_lock_init(&b->rb_lock);
844 spin_lock_init(&b->irq_lock);
846 timer_setup(&b->fake_irq, intel_breadcrumbs_fake_irq, 0);
847 timer_setup(&b->hangcheck, intel_breadcrumbs_hangcheck, 0);
849 /* Spawn a thread to provide a common bottom-half for all signals.
850 * As this is an asynchronous interface we cannot steal the current
851 * task for handling the bottom-half to the user interrupt, therefore
852 * we create a thread to do the coherent seqno dance after the
853 * interrupt and then signal the waitqueue (via the dma-buf/fence).
855 tsk = kthread_run(intel_breadcrumbs_signaler, engine,
856 "i915/signal:%d", engine->id);
865 static void cancel_fake_irq(struct intel_engine_cs *engine)
867 struct intel_breadcrumbs *b = &engine->breadcrumbs;
869 del_timer_sync(&b->hangcheck);
870 del_timer_sync(&b->fake_irq);
871 clear_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings);
874 void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine)
876 struct intel_breadcrumbs *b = &engine->breadcrumbs;
878 cancel_fake_irq(engine);
879 spin_lock_irq(&b->irq_lock);
886 /* We set the IRQ_BREADCRUMB bit when we enable the irq presuming the
887 * GPU is active and may have already executed the MI_USER_INTERRUPT
888 * before the CPU is ready to receive. However, the engine is currently
889 * idle (we haven't started it yet), there is no possibility for a
890 * missed interrupt as we enabled the irq and so we can clear the
891 * immediate wakeup (until a real interrupt arrives for the waiter).
893 clear_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
898 spin_unlock_irq(&b->irq_lock);
901 void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
903 struct intel_breadcrumbs *b = &engine->breadcrumbs;
905 /* The engines should be idle and all requests accounted for! */
906 WARN_ON(READ_ONCE(b->irq_wait));
907 WARN_ON(!RB_EMPTY_ROOT(&b->waiters));
908 WARN_ON(rcu_access_pointer(b->first_signal));
909 WARN_ON(!RB_EMPTY_ROOT(&b->signals));
911 if (!IS_ERR_OR_NULL(b->signaler))
912 kthread_stop(b->signaler);
914 cancel_fake_irq(engine);
917 bool intel_breadcrumbs_busy(struct intel_engine_cs *engine)
919 struct intel_breadcrumbs *b = &engine->breadcrumbs;
922 spin_lock_irq(&b->rb_lock);
925 wake_up_process(b->irq_wait->tsk);
929 if (rcu_access_pointer(b->first_signal)) {
930 wake_up_process(b->signaler);
934 spin_unlock_irq(&b->rb_lock);
939 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
940 #include "selftests/intel_breadcrumbs.c"