2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include <linux/kthread.h>
26 #include <uapi/linux/sched/types.h>
31 #define task_asleep(tsk) ((tsk)->state & TASK_NORMAL && !(tsk)->on_cpu)
33 #define task_asleep(tsk) ((tsk)->state & TASK_NORMAL)
36 static unsigned int __intel_breadcrumbs_wakeup(struct intel_breadcrumbs *b)
38 struct intel_wait *wait;
39 unsigned int result = 0;
41 lockdep_assert_held(&b->irq_lock);
46 * N.B. Since task_asleep() and ttwu are not atomic, the
47 * waiter may actually go to sleep after the check, causing
48 * us to suppress a valid wakeup. We prefer to reduce the
49 * number of false positive missed_breadcrumb() warnings
50 * at the expense of a few false negatives, as it it easy
51 * to trigger a false positive under heavy load. Enough
52 * signal should remain from genuine missed_breadcrumb()
53 * for us to detect in CI.
55 bool was_asleep = task_asleep(wait->tsk);
57 result = ENGINE_WAKEUP_WAITER;
58 if (wake_up_process(wait->tsk) && was_asleep)
59 result |= ENGINE_WAKEUP_ASLEEP;
65 unsigned int intel_engine_wakeup(struct intel_engine_cs *engine)
67 struct intel_breadcrumbs *b = &engine->breadcrumbs;
71 spin_lock_irqsave(&b->irq_lock, flags);
72 result = __intel_breadcrumbs_wakeup(b);
73 spin_unlock_irqrestore(&b->irq_lock, flags);
78 static unsigned long wait_timeout(void)
80 return round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES);
83 static noinline void missed_breadcrumb(struct intel_engine_cs *engine)
85 if (drm_debug & DRM_UT_DRIVER) {
86 struct drm_printer p = drm_debug_printer(__func__);
88 intel_engine_dump(engine, &p,
89 "%s missed breadcrumb at %pS\n",
90 engine->name, __builtin_return_address(0));
93 set_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings);
96 static void intel_breadcrumbs_hangcheck(struct timer_list *t)
98 struct intel_engine_cs *engine =
99 from_timer(engine, t, breadcrumbs.hangcheck);
100 struct intel_breadcrumbs *b = &engine->breadcrumbs;
105 if (b->hangcheck_interrupts != atomic_read(&engine->irq_count)) {
106 b->hangcheck_interrupts = atomic_read(&engine->irq_count);
107 mod_timer(&b->hangcheck, wait_timeout());
111 /* We keep the hangcheck timer alive until we disarm the irq, even
112 * if there are no waiters at present.
114 * If the waiter was currently running, assume it hasn't had a chance
115 * to process the pending interrupt (e.g, low priority task on a loaded
116 * system) and wait until it sleeps before declaring a missed interrupt.
118 * If the waiter was asleep (and not even pending a wakeup), then we
119 * must have missed an interrupt as the GPU has stopped advancing
120 * but we still have a waiter. Assuming all batches complete within
121 * DRM_I915_HANGCHECK_JIFFIES [1.5s]!
123 if (intel_engine_wakeup(engine) & ENGINE_WAKEUP_ASLEEP) {
124 missed_breadcrumb(engine);
125 mod_timer(&b->fake_irq, jiffies + 1);
127 mod_timer(&b->hangcheck, wait_timeout());
131 static void intel_breadcrumbs_fake_irq(struct timer_list *t)
133 struct intel_engine_cs *engine = from_timer(engine, t,
134 breadcrumbs.fake_irq);
135 struct intel_breadcrumbs *b = &engine->breadcrumbs;
137 /* The timer persists in case we cannot enable interrupts,
138 * or if we have previously seen seqno/interrupt incoherency
139 * ("missed interrupt" syndrome, better known as a "missed breadcrumb").
140 * Here the worker will wake up every jiffie in order to kick the
141 * oldest waiter to do the coherent seqno check.
144 spin_lock_irq(&b->irq_lock);
145 if (b->irq_armed && !__intel_breadcrumbs_wakeup(b))
146 __intel_engine_disarm_breadcrumbs(engine);
147 spin_unlock_irq(&b->irq_lock);
151 mod_timer(&b->fake_irq, jiffies + 1);
154 static void irq_enable(struct intel_engine_cs *engine)
157 * FIXME: Ideally we want this on the API boundary, but for the
158 * sake of testing with mock breadcrumbs (no HW so unable to
159 * enable irqs) we place it deep within the bowels, at the point
162 GEM_BUG_ON(!intel_irqs_enabled(engine->i915));
164 /* Enabling the IRQ may miss the generation of the interrupt, but
165 * we still need to force the barrier before reading the seqno,
168 set_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
170 /* Caller disables interrupts */
171 spin_lock(&engine->i915->irq_lock);
172 engine->irq_enable(engine);
173 spin_unlock(&engine->i915->irq_lock);
176 static void irq_disable(struct intel_engine_cs *engine)
178 /* Caller disables interrupts */
179 spin_lock(&engine->i915->irq_lock);
180 engine->irq_disable(engine);
181 spin_unlock(&engine->i915->irq_lock);
184 void __intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine)
186 struct intel_breadcrumbs *b = &engine->breadcrumbs;
188 lockdep_assert_held(&b->irq_lock);
189 GEM_BUG_ON(b->irq_wait);
190 GEM_BUG_ON(!b->irq_armed);
192 GEM_BUG_ON(!b->irq_enabled);
193 if (!--b->irq_enabled)
196 b->irq_armed = false;
199 void intel_engine_pin_breadcrumbs_irq(struct intel_engine_cs *engine)
201 struct intel_breadcrumbs *b = &engine->breadcrumbs;
203 spin_lock_irq(&b->irq_lock);
204 if (!b->irq_enabled++)
206 GEM_BUG_ON(!b->irq_enabled); /* no overflow! */
207 spin_unlock_irq(&b->irq_lock);
210 void intel_engine_unpin_breadcrumbs_irq(struct intel_engine_cs *engine)
212 struct intel_breadcrumbs *b = &engine->breadcrumbs;
214 spin_lock_irq(&b->irq_lock);
215 GEM_BUG_ON(!b->irq_enabled); /* no underflow! */
216 if (!--b->irq_enabled)
218 spin_unlock_irq(&b->irq_lock);
221 void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine)
223 struct intel_breadcrumbs *b = &engine->breadcrumbs;
224 struct intel_wait *wait, *n;
227 goto wakeup_signaler;
230 * We only disarm the irq when we are idle (all requests completed),
231 * so if the bottom-half remains asleep, it missed the request
234 if (intel_engine_wakeup(engine) & ENGINE_WAKEUP_ASLEEP)
235 missed_breadcrumb(engine);
237 spin_lock_irq(&b->rb_lock);
239 spin_lock(&b->irq_lock);
242 __intel_engine_disarm_breadcrumbs(engine);
243 spin_unlock(&b->irq_lock);
245 rbtree_postorder_for_each_entry_safe(wait, n, &b->waiters, node) {
246 RB_CLEAR_NODE(&wait->node);
247 wake_up_process(wait->tsk);
249 b->waiters = RB_ROOT;
251 spin_unlock_irq(&b->rb_lock);
254 * The signaling thread may be asleep holding a reference to a request,
255 * that had its signaling cancelled prior to being preempted. We need
256 * to kick the signaler, just in case, to release any such reference.
259 wake_up_process(b->signaler);
262 static bool use_fake_irq(const struct intel_breadcrumbs *b)
264 const struct intel_engine_cs *engine =
265 container_of(b, struct intel_engine_cs, breadcrumbs);
267 if (!test_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings))
270 /* Only start with the heavy weight fake irq timer if we have not
271 * seen any interrupts since enabling it the first time. If the
272 * interrupts are still arriving, it means we made a mistake in our
273 * engine->seqno_barrier(), a timing error that should be transient
274 * and unlikely to reoccur.
276 return atomic_read(&engine->irq_count) == b->hangcheck_interrupts;
279 static void enable_fake_irq(struct intel_breadcrumbs *b)
281 /* Ensure we never sleep indefinitely */
282 if (!b->irq_enabled || use_fake_irq(b))
283 mod_timer(&b->fake_irq, jiffies + 1);
285 mod_timer(&b->hangcheck, wait_timeout());
288 static bool __intel_breadcrumbs_enable_irq(struct intel_breadcrumbs *b)
290 struct intel_engine_cs *engine =
291 container_of(b, struct intel_engine_cs, breadcrumbs);
292 struct drm_i915_private *i915 = engine->i915;
295 lockdep_assert_held(&b->irq_lock);
299 /* The breadcrumb irq will be disarmed on the interrupt after the
300 * waiters are signaled. This gives us a single interrupt window in
301 * which we can add a new waiter and avoid the cost of re-enabling
306 if (I915_SELFTEST_ONLY(b->mock)) {
307 /* For our mock objects we want to avoid interaction
308 * with the real hardware (which is not set up). So
309 * we simply pretend we have enabled the powerwell
310 * and the irq, and leave it up to the mock
311 * implementation to call intel_engine_wakeup()
312 * itself when it wants to simulate a user interrupt,
317 /* Since we are waiting on a request, the GPU should be busy
318 * and should have its own rpm reference. This is tracked
319 * by i915->gt.awake, we can forgo holding our own wakref
320 * for the interrupt as before i915->gt.awake is released (when
321 * the driver is idle) we disarm the breadcrumbs.
324 /* No interrupts? Kick the waiter every jiffie! */
326 if (!b->irq_enabled++ &&
327 !test_bit(engine->id, &i915->gpu_error.test_irq_rings)) {
336 static inline struct intel_wait *to_wait(struct rb_node *node)
338 return rb_entry(node, struct intel_wait, node);
341 static inline void __intel_breadcrumbs_finish(struct intel_breadcrumbs *b,
342 struct intel_wait *wait)
344 lockdep_assert_held(&b->rb_lock);
345 GEM_BUG_ON(b->irq_wait == wait);
347 /* This request is completed, so remove it from the tree, mark it as
348 * complete, and *then* wake up the associated task. N.B. when the
349 * task wakes up, it will find the empty rb_node, discern that it
350 * has already been removed from the tree and skip the serialisation
351 * of the b->rb_lock and b->irq_lock. This means that the destruction
352 * of the intel_wait is not serialised with the interrupt handler
353 * by the waiter - it must instead be serialised by the caller.
355 rb_erase(&wait->node, &b->waiters);
356 RB_CLEAR_NODE(&wait->node);
358 wake_up_process(wait->tsk); /* implicit smp_wmb() */
361 static inline void __intel_breadcrumbs_next(struct intel_engine_cs *engine,
362 struct rb_node *next)
364 struct intel_breadcrumbs *b = &engine->breadcrumbs;
366 spin_lock(&b->irq_lock);
367 GEM_BUG_ON(!b->irq_armed);
368 GEM_BUG_ON(!b->irq_wait);
369 b->irq_wait = to_wait(next);
370 spin_unlock(&b->irq_lock);
372 /* We always wake up the next waiter that takes over as the bottom-half
373 * as we may delegate not only the irq-seqno barrier to the next waiter
374 * but also the task of waking up concurrent waiters.
377 wake_up_process(to_wait(next)->tsk);
380 static bool __intel_engine_add_wait(struct intel_engine_cs *engine,
381 struct intel_wait *wait)
383 struct intel_breadcrumbs *b = &engine->breadcrumbs;
384 struct rb_node **p, *parent, *completed;
388 /* Insert the request into the retirement ordered list
389 * of waiters by walking the rbtree. If we are the oldest
390 * seqno in the tree (the first to be retired), then
391 * set ourselves as the bottom-half.
393 * As we descend the tree, prune completed branches since we hold the
394 * spinlock we know that the first_waiter must be delayed and can
395 * reduce some of the sequential wake up latency if we take action
396 * ourselves and wake up the completed tasks in parallel. Also, by
397 * removing stale elements in the tree, we may be able to reduce the
398 * ping-pong between the old bottom-half and ourselves as first-waiter.
404 seqno = intel_engine_get_seqno(engine);
406 /* If the request completed before we managed to grab the spinlock,
407 * return now before adding ourselves to the rbtree. We let the
408 * current bottom-half handle any pending wakeups and instead
409 * try and get out of the way quickly.
411 if (i915_seqno_passed(seqno, wait->seqno)) {
412 RB_CLEAR_NODE(&wait->node);
416 p = &b->waiters.rb_node;
419 if (wait->seqno == to_wait(parent)->seqno) {
420 /* We have multiple waiters on the same seqno, select
421 * the highest priority task (that with the smallest
422 * task->prio) to serve as the bottom-half for this
425 if (wait->tsk->prio > to_wait(parent)->tsk->prio) {
426 p = &parent->rb_right;
429 p = &parent->rb_left;
431 } else if (i915_seqno_passed(wait->seqno,
432 to_wait(parent)->seqno)) {
433 p = &parent->rb_right;
434 if (i915_seqno_passed(seqno, to_wait(parent)->seqno))
439 p = &parent->rb_left;
442 rb_link_node(&wait->node, parent, p);
443 rb_insert_color(&wait->node, &b->waiters);
446 spin_lock(&b->irq_lock);
448 /* After assigning ourselves as the new bottom-half, we must
449 * perform a cursory check to prevent a missed interrupt.
450 * Either we miss the interrupt whilst programming the hardware,
451 * or if there was a previous waiter (for a later seqno) they
452 * may be woken instead of us (due to the inherent race
453 * in the unlocked read of b->irq_seqno_bh in the irq handler)
454 * and so we miss the wake up.
456 armed = __intel_breadcrumbs_enable_irq(b);
457 spin_unlock(&b->irq_lock);
461 /* Advance the bottom-half (b->irq_wait) before we wake up
462 * the waiters who may scribble over their intel_wait
463 * just as the interrupt handler is dereferencing it via
467 struct rb_node *next = rb_next(completed);
468 GEM_BUG_ON(next == &wait->node);
469 __intel_breadcrumbs_next(engine, next);
473 struct intel_wait *crumb = to_wait(completed);
474 completed = rb_prev(completed);
475 __intel_breadcrumbs_finish(b, crumb);
479 GEM_BUG_ON(!b->irq_wait);
480 GEM_BUG_ON(!b->irq_armed);
481 GEM_BUG_ON(rb_first(&b->waiters) != &b->irq_wait->node);
486 bool intel_engine_add_wait(struct intel_engine_cs *engine,
487 struct intel_wait *wait)
489 struct intel_breadcrumbs *b = &engine->breadcrumbs;
492 spin_lock_irq(&b->rb_lock);
493 armed = __intel_engine_add_wait(engine, wait);
494 spin_unlock_irq(&b->rb_lock);
498 /* Make the caller recheck if its request has already started. */
499 return i915_seqno_passed(intel_engine_get_seqno(engine),
503 static inline bool chain_wakeup(struct rb_node *rb, int priority)
505 return rb && to_wait(rb)->tsk->prio <= priority;
508 static inline int wakeup_priority(struct intel_breadcrumbs *b,
509 struct task_struct *tsk)
511 if (tsk == b->signaler)
517 static void __intel_engine_remove_wait(struct intel_engine_cs *engine,
518 struct intel_wait *wait)
520 struct intel_breadcrumbs *b = &engine->breadcrumbs;
522 lockdep_assert_held(&b->rb_lock);
524 if (RB_EMPTY_NODE(&wait->node))
527 if (b->irq_wait == wait) {
528 const int priority = wakeup_priority(b, wait->tsk);
529 struct rb_node *next;
531 /* We are the current bottom-half. Find the next candidate,
532 * the first waiter in the queue on the remaining oldest
533 * request. As multiple seqnos may complete in the time it
534 * takes us to wake up and find the next waiter, we have to
535 * wake up that waiter for it to perform its own coherent
538 next = rb_next(&wait->node);
539 if (chain_wakeup(next, priority)) {
540 /* If the next waiter is already complete,
541 * wake it up and continue onto the next waiter. So
542 * if have a small herd, they will wake up in parallel
543 * rather than sequentially, which should reduce
544 * the overall latency in waking all the completed
547 * However, waking up a chain adds extra latency to
548 * the first_waiter. This is undesirable if that
549 * waiter is a high priority task.
551 u32 seqno = intel_engine_get_seqno(engine);
553 while (i915_seqno_passed(seqno, to_wait(next)->seqno)) {
554 struct rb_node *n = rb_next(next);
556 __intel_breadcrumbs_finish(b, to_wait(next));
558 if (!chain_wakeup(next, priority))
563 __intel_breadcrumbs_next(engine, next);
565 GEM_BUG_ON(rb_first(&b->waiters) == &wait->node);
568 GEM_BUG_ON(RB_EMPTY_NODE(&wait->node));
569 rb_erase(&wait->node, &b->waiters);
570 RB_CLEAR_NODE(&wait->node);
573 GEM_BUG_ON(b->irq_wait == wait);
574 GEM_BUG_ON(rb_first(&b->waiters) !=
575 (b->irq_wait ? &b->irq_wait->node : NULL));
578 void intel_engine_remove_wait(struct intel_engine_cs *engine,
579 struct intel_wait *wait)
581 struct intel_breadcrumbs *b = &engine->breadcrumbs;
583 /* Quick check to see if this waiter was already decoupled from
584 * the tree by the bottom-half to avoid contention on the spinlock
587 if (RB_EMPTY_NODE(&wait->node)) {
588 GEM_BUG_ON(READ_ONCE(b->irq_wait) == wait);
592 spin_lock_irq(&b->rb_lock);
593 __intel_engine_remove_wait(engine, wait);
594 spin_unlock_irq(&b->rb_lock);
597 static bool signal_valid(const struct drm_i915_gem_request *request)
599 return intel_wait_check_request(&request->signaling.wait, request);
602 static bool signal_complete(const struct drm_i915_gem_request *request)
607 /* If another process served as the bottom-half it may have already
608 * signalled that this wait is already completed.
610 if (intel_wait_complete(&request->signaling.wait))
611 return signal_valid(request);
613 /* Carefully check if the request is complete, giving time for the
614 * seqno to be visible or if the GPU hung.
616 if (__i915_request_irq_complete(request))
622 static struct drm_i915_gem_request *to_signaler(struct rb_node *rb)
624 return rb_entry(rb, struct drm_i915_gem_request, signaling.node);
627 static void signaler_set_rtpriority(void)
629 struct sched_param param = { .sched_priority = 1 };
631 sched_setscheduler_nocheck(current, SCHED_FIFO, ¶m);
634 static int intel_breadcrumbs_signaler(void *arg)
636 struct intel_engine_cs *engine = arg;
637 struct intel_breadcrumbs *b = &engine->breadcrumbs;
638 struct drm_i915_gem_request *request;
640 /* Install ourselves with high priority to reduce signalling latency */
641 signaler_set_rtpriority();
644 bool do_schedule = true;
646 set_current_state(TASK_INTERRUPTIBLE);
648 /* We are either woken up by the interrupt bottom-half,
649 * or by a client adding a new signaller. In both cases,
650 * the GPU seqno may have advanced beyond our oldest signal.
651 * If it has, propagate the signal, remove the waiter and
652 * check again with the next oldest signal. Otherwise we
653 * need to wait for a new interrupt from the GPU or for
657 request = rcu_dereference(b->first_signal);
659 request = i915_gem_request_get_rcu(request);
661 if (signal_complete(request)) {
663 dma_fence_signal(&request->fence);
664 local_bh_enable(); /* kick start the tasklets */
666 spin_lock_irq(&b->rb_lock);
668 /* Wake up all other completed waiters and select the
669 * next bottom-half for the next user interrupt.
671 __intel_engine_remove_wait(engine,
672 &request->signaling.wait);
674 /* Find the next oldest signal. Note that as we have
675 * not been holding the lock, another client may
676 * have installed an even older signal than the one
677 * we just completed - so double check we are still
678 * the oldest before picking the next one.
680 if (request == rcu_access_pointer(b->first_signal)) {
682 rb_next(&request->signaling.node);
683 rcu_assign_pointer(b->first_signal,
684 rb ? to_signaler(rb) : NULL);
686 rb_erase(&request->signaling.node, &b->signals);
687 RB_CLEAR_NODE(&request->signaling.node);
689 spin_unlock_irq(&b->rb_lock);
691 i915_gem_request_put(request);
693 /* If the engine is saturated we may be continually
694 * processing completed requests. This angers the
695 * NMI watchdog if we never let anything else
696 * have access to the CPU. Let's pretend to be nice
697 * and relinquish the CPU if we burn through the
698 * entire RT timeslice!
700 do_schedule = need_resched();
703 if (unlikely(do_schedule)) {
704 if (kthread_should_park())
707 if (unlikely(kthread_should_stop())) {
708 i915_gem_request_put(request);
714 i915_gem_request_put(request);
716 __set_current_state(TASK_RUNNING);
721 void intel_engine_enable_signaling(struct drm_i915_gem_request *request,
724 struct intel_engine_cs *engine = request->engine;
725 struct intel_breadcrumbs *b = &engine->breadcrumbs;
728 /* Note that we may be called from an interrupt handler on another
729 * device (e.g. nouveau signaling a fence completion causing us
730 * to submit a request, and so enable signaling). As such,
731 * we need to make sure that all other users of b->rb_lock protect
732 * against interrupts, i.e. use spin_lock_irqsave.
735 /* locked by dma_fence_enable_sw_signaling() (irqsafe fence->lock) */
736 GEM_BUG_ON(!irqs_disabled());
737 lockdep_assert_held(&request->lock);
739 seqno = i915_gem_request_global_seqno(request);
743 request->signaling.wait.tsk = b->signaler;
744 request->signaling.wait.request = request;
745 request->signaling.wait.seqno = seqno;
746 i915_gem_request_get(request);
748 spin_lock(&b->rb_lock);
750 /* First add ourselves into the list of waiters, but register our
751 * bottom-half as the signaller thread. As per usual, only the oldest
752 * waiter (not just signaller) is tasked as the bottom-half waking
753 * up all completed waiters after the user interrupt.
755 * If we are the oldest waiter, enable the irq (after which we
756 * must double check that the seqno did not complete).
758 wakeup &= __intel_engine_add_wait(engine, &request->signaling.wait);
760 if (!__i915_gem_request_completed(request, seqno)) {
761 struct rb_node *parent, **p;
764 /* Now insert ourselves into the retirement ordered list of
765 * signals on this engine. We track the oldest seqno as that
766 * will be the first signal to complete.
770 p = &b->signals.rb_node;
773 if (i915_seqno_passed(seqno,
774 to_signaler(parent)->signaling.wait.seqno)) {
775 p = &parent->rb_right;
778 p = &parent->rb_left;
781 rb_link_node(&request->signaling.node, parent, p);
782 rb_insert_color(&request->signaling.node, &b->signals);
784 rcu_assign_pointer(b->first_signal, request);
786 __intel_engine_remove_wait(engine, &request->signaling.wait);
787 i915_gem_request_put(request);
791 spin_unlock(&b->rb_lock);
794 wake_up_process(b->signaler);
797 void intel_engine_cancel_signaling(struct drm_i915_gem_request *request)
799 struct intel_engine_cs *engine = request->engine;
800 struct intel_breadcrumbs *b = &engine->breadcrumbs;
802 GEM_BUG_ON(!irqs_disabled());
803 lockdep_assert_held(&request->lock);
804 GEM_BUG_ON(!request->signaling.wait.seqno);
806 spin_lock(&b->rb_lock);
808 if (!RB_EMPTY_NODE(&request->signaling.node)) {
809 if (request == rcu_access_pointer(b->first_signal)) {
811 rb_next(&request->signaling.node);
812 rcu_assign_pointer(b->first_signal,
813 rb ? to_signaler(rb) : NULL);
815 rb_erase(&request->signaling.node, &b->signals);
816 RB_CLEAR_NODE(&request->signaling.node);
817 i915_gem_request_put(request);
820 __intel_engine_remove_wait(engine, &request->signaling.wait);
822 spin_unlock(&b->rb_lock);
824 request->signaling.wait.seqno = 0;
827 int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine)
829 struct intel_breadcrumbs *b = &engine->breadcrumbs;
830 struct task_struct *tsk;
832 spin_lock_init(&b->rb_lock);
833 spin_lock_init(&b->irq_lock);
835 timer_setup(&b->fake_irq, intel_breadcrumbs_fake_irq, 0);
836 timer_setup(&b->hangcheck, intel_breadcrumbs_hangcheck, 0);
838 /* Spawn a thread to provide a common bottom-half for all signals.
839 * As this is an asynchronous interface we cannot steal the current
840 * task for handling the bottom-half to the user interrupt, therefore
841 * we create a thread to do the coherent seqno dance after the
842 * interrupt and then signal the waitqueue (via the dma-buf/fence).
844 tsk = kthread_run(intel_breadcrumbs_signaler, engine,
845 "i915/signal:%d", engine->id);
854 static void cancel_fake_irq(struct intel_engine_cs *engine)
856 struct intel_breadcrumbs *b = &engine->breadcrumbs;
858 del_timer_sync(&b->hangcheck);
859 del_timer_sync(&b->fake_irq);
860 clear_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings);
863 void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine)
865 struct intel_breadcrumbs *b = &engine->breadcrumbs;
867 cancel_fake_irq(engine);
868 spin_lock_irq(&b->irq_lock);
875 /* We set the IRQ_BREADCRUMB bit when we enable the irq presuming the
876 * GPU is active and may have already executed the MI_USER_INTERRUPT
877 * before the CPU is ready to receive. However, the engine is currently
878 * idle (we haven't started it yet), there is no possibility for a
879 * missed interrupt as we enabled the irq and so we can clear the
880 * immediate wakeup (until a real interrupt arrives for the waiter).
882 clear_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
887 spin_unlock_irq(&b->irq_lock);
890 void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
892 struct intel_breadcrumbs *b = &engine->breadcrumbs;
894 /* The engines should be idle and all requests accounted for! */
895 WARN_ON(READ_ONCE(b->irq_wait));
896 WARN_ON(!RB_EMPTY_ROOT(&b->waiters));
897 WARN_ON(rcu_access_pointer(b->first_signal));
898 WARN_ON(!RB_EMPTY_ROOT(&b->signals));
900 if (!IS_ERR_OR_NULL(b->signaler))
901 kthread_stop(b->signaler);
903 cancel_fake_irq(engine);
906 bool intel_breadcrumbs_busy(struct intel_engine_cs *engine)
908 struct intel_breadcrumbs *b = &engine->breadcrumbs;
911 spin_lock_irq(&b->rb_lock);
914 wake_up_process(b->irq_wait->tsk);
918 if (rcu_access_pointer(b->first_signal)) {
919 wake_up_process(b->signaler);
923 spin_unlock_irq(&b->rb_lock);
928 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
929 #include "selftests/intel_breadcrumbs.c"