2 * Copyright © 2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include <drm/drm_print.h>
27 #include "gem/i915_gem_context.h"
31 #include "intel_context.h"
32 #include "intel_engine.h"
33 #include "intel_engine_pm.h"
34 #include "intel_engine_pool.h"
35 #include "intel_engine_user.h"
37 #include "intel_gt_requests.h"
38 #include "intel_lrc.h"
39 #include "intel_reset.h"
40 #include "intel_ring.h"
42 /* Haswell does have the CXT_SIZE register however it does not appear to be
43 * valid. Now, docs explain in dwords what is in the context object. The full
44 * size is 70720 bytes, however, the power context and execlist context will
45 * never be saved (power context is stored elsewhere, and execlists don't work
46 * on HSW) - so the final size, including the extra state required for the
47 * Resource Streamer, is 66944 bytes, which rounds to 17 pages.
49 #define HSW_CXT_TOTAL_SIZE (17 * PAGE_SIZE)
51 #define DEFAULT_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
52 #define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE)
53 #define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
54 #define GEN10_LR_CONTEXT_RENDER_SIZE (18 * PAGE_SIZE)
55 #define GEN11_LR_CONTEXT_RENDER_SIZE (14 * PAGE_SIZE)
57 #define GEN8_LR_CONTEXT_OTHER_SIZE ( 2 * PAGE_SIZE)
59 #define MAX_MMIO_BASES 3
64 /* mmio bases table *must* be sorted in reverse gen order */
65 struct engine_mmio_base {
68 } mmio_bases[MAX_MMIO_BASES];
71 static const struct engine_info intel_engines[] = {
74 .class = RENDER_CLASS,
77 { .gen = 1, .base = RENDER_RING_BASE }
82 .class = COPY_ENGINE_CLASS,
85 { .gen = 6, .base = BLT_RING_BASE }
90 .class = VIDEO_DECODE_CLASS,
93 { .gen = 11, .base = GEN11_BSD_RING_BASE },
94 { .gen = 6, .base = GEN6_BSD_RING_BASE },
95 { .gen = 4, .base = BSD_RING_BASE }
100 .class = VIDEO_DECODE_CLASS,
103 { .gen = 11, .base = GEN11_BSD2_RING_BASE },
104 { .gen = 8, .base = GEN8_BSD2_RING_BASE }
109 .class = VIDEO_DECODE_CLASS,
112 { .gen = 11, .base = GEN11_BSD3_RING_BASE }
117 .class = VIDEO_DECODE_CLASS,
120 { .gen = 11, .base = GEN11_BSD4_RING_BASE }
125 .class = VIDEO_ENHANCEMENT_CLASS,
128 { .gen = 11, .base = GEN11_VEBOX_RING_BASE },
129 { .gen = 7, .base = VEBOX_RING_BASE }
134 .class = VIDEO_ENHANCEMENT_CLASS,
137 { .gen = 11, .base = GEN11_VEBOX2_RING_BASE }
143 * intel_engine_context_size() - return the size of the context for an engine
145 * @class: engine class
147 * Each engine class may require a different amount of space for a context
150 * Return: size (in bytes) of an engine class specific context image
152 * Note: this size includes the HWSP, which is part of the context image
153 * in LRC mode, but does not include the "shared data page" used with
154 * GuC submission. The caller should account for this if using the GuC.
156 u32 intel_engine_context_size(struct intel_gt *gt, u8 class)
158 struct intel_uncore *uncore = gt->uncore;
161 BUILD_BUG_ON(I915_GTT_PAGE_SIZE != PAGE_SIZE);
165 switch (INTEL_GEN(gt->i915)) {
167 MISSING_CASE(INTEL_GEN(gt->i915));
168 return DEFAULT_LR_CONTEXT_RENDER_SIZE;
171 return GEN11_LR_CONTEXT_RENDER_SIZE;
173 return GEN10_LR_CONTEXT_RENDER_SIZE;
175 return GEN9_LR_CONTEXT_RENDER_SIZE;
177 return GEN8_LR_CONTEXT_RENDER_SIZE;
179 if (IS_HASWELL(gt->i915))
180 return HSW_CXT_TOTAL_SIZE;
182 cxt_size = intel_uncore_read(uncore, GEN7_CXT_SIZE);
183 return round_up(GEN7_CXT_TOTAL_SIZE(cxt_size) * 64,
186 cxt_size = intel_uncore_read(uncore, CXT_SIZE);
187 return round_up(GEN6_CXT_TOTAL_SIZE(cxt_size) * 64,
192 * There is a discrepancy here between the size reported
193 * by the register and the size of the context layout
194 * in the docs. Both are described as authorative!
196 * The discrepancy is on the order of a few cachelines,
197 * but the total is under one page (4k), which is our
198 * minimum allocation anyway so it should all come
201 cxt_size = intel_uncore_read(uncore, CXT_SIZE) + 1;
202 DRM_DEBUG_DRIVER("gen%d CXT_SIZE = %d bytes [0x%08x]\n",
206 return round_up(cxt_size * 64, PAGE_SIZE);
209 /* For the special day when i810 gets merged. */
217 case VIDEO_DECODE_CLASS:
218 case VIDEO_ENHANCEMENT_CLASS:
219 case COPY_ENGINE_CLASS:
220 if (INTEL_GEN(gt->i915) < 8)
222 return GEN8_LR_CONTEXT_OTHER_SIZE;
226 static u32 __engine_mmio_base(struct drm_i915_private *i915,
227 const struct engine_mmio_base *bases)
231 for (i = 0; i < MAX_MMIO_BASES; i++)
232 if (INTEL_GEN(i915) >= bases[i].gen)
235 GEM_BUG_ON(i == MAX_MMIO_BASES);
236 GEM_BUG_ON(!bases[i].base);
238 return bases[i].base;
241 static void __sprint_engine_name(struct intel_engine_cs *engine)
244 * Before we know what the uABI name for this engine will be,
245 * we still would like to keep track of this engine in the debug logs.
246 * We throw in a ' here as a reminder that this isn't its final name.
248 GEM_WARN_ON(snprintf(engine->name, sizeof(engine->name), "%s'%u",
249 intel_engine_class_repr(engine->class),
250 engine->instance) >= sizeof(engine->name));
253 void intel_engine_set_hwsp_writemask(struct intel_engine_cs *engine, u32 mask)
256 * Though they added more rings on g4x/ilk, they did not add
257 * per-engine HWSTAM until gen6.
259 if (INTEL_GEN(engine->i915) < 6 && engine->class != RENDER_CLASS)
262 if (INTEL_GEN(engine->i915) >= 3)
263 ENGINE_WRITE(engine, RING_HWSTAM, mask);
265 ENGINE_WRITE16(engine, RING_HWSTAM, mask);
268 static void intel_engine_sanitize_mmio(struct intel_engine_cs *engine)
270 /* Mask off all writes into the unknown HWSP */
271 intel_engine_set_hwsp_writemask(engine, ~0u);
274 static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
276 const struct engine_info *info = &intel_engines[id];
277 struct intel_engine_cs *engine;
279 BUILD_BUG_ON(MAX_ENGINE_CLASS >= BIT(GEN11_ENGINE_CLASS_WIDTH));
280 BUILD_BUG_ON(MAX_ENGINE_INSTANCE >= BIT(GEN11_ENGINE_INSTANCE_WIDTH));
282 if (GEM_DEBUG_WARN_ON(id >= ARRAY_SIZE(gt->engine)))
285 if (GEM_DEBUG_WARN_ON(info->class > MAX_ENGINE_CLASS))
288 if (GEM_DEBUG_WARN_ON(info->instance > MAX_ENGINE_INSTANCE))
291 if (GEM_DEBUG_WARN_ON(gt->engine_class[info->class][info->instance]))
294 engine = kzalloc(sizeof(*engine), GFP_KERNEL);
298 BUILD_BUG_ON(BITS_PER_TYPE(engine->mask) < I915_NUM_ENGINES);
301 engine->legacy_idx = INVALID_ENGINE;
302 engine->mask = BIT(id);
303 engine->i915 = gt->i915;
305 engine->uncore = gt->uncore;
306 engine->hw_id = engine->guc_id = info->hw_id;
307 engine->mmio_base = __engine_mmio_base(gt->i915, info->mmio_bases);
309 engine->class = info->class;
310 engine->instance = info->instance;
311 __sprint_engine_name(engine);
313 engine->props.heartbeat_interval_ms =
314 CONFIG_DRM_I915_HEARTBEAT_INTERVAL;
315 engine->props.preempt_timeout_ms =
316 CONFIG_DRM_I915_PREEMPT_TIMEOUT;
317 engine->props.stop_timeout_ms =
318 CONFIG_DRM_I915_STOP_TIMEOUT;
319 engine->props.timeslice_duration_ms =
320 CONFIG_DRM_I915_TIMESLICE_DURATION;
322 engine->context_size = intel_engine_context_size(gt, engine->class);
323 if (WARN_ON(engine->context_size > BIT(20)))
324 engine->context_size = 0;
325 if (engine->context_size)
326 DRIVER_CAPS(gt->i915)->has_logical_contexts = true;
328 /* Nothing to do here, execute in order of dependencies */
329 engine->schedule = NULL;
331 ewma__engine_latency_init(&engine->latency);
332 seqlock_init(&engine->stats.lock);
334 ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier);
336 /* Scrub mmio state on takeover */
337 intel_engine_sanitize_mmio(engine);
339 gt->engine_class[info->class][info->instance] = engine;
340 gt->engine[id] = engine;
342 gt->i915->engine[id] = engine;
347 static void __setup_engine_capabilities(struct intel_engine_cs *engine)
349 struct drm_i915_private *i915 = engine->i915;
351 if (engine->class == VIDEO_DECODE_CLASS) {
353 * HEVC support is present on first engine instance
354 * before Gen11 and on all instances afterwards.
356 if (INTEL_GEN(i915) >= 11 ||
357 (INTEL_GEN(i915) >= 9 && engine->instance == 0))
358 engine->uabi_capabilities |=
359 I915_VIDEO_CLASS_CAPABILITY_HEVC;
362 * SFC block is present only on even logical engine
365 if ((INTEL_GEN(i915) >= 11 &&
366 RUNTIME_INFO(i915)->vdbox_sfc_access & engine->mask) ||
367 (INTEL_GEN(i915) >= 9 && engine->instance == 0))
368 engine->uabi_capabilities |=
369 I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC;
370 } else if (engine->class == VIDEO_ENHANCEMENT_CLASS) {
371 if (INTEL_GEN(i915) >= 9)
372 engine->uabi_capabilities |=
373 I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC;
377 static void intel_setup_engine_capabilities(struct intel_gt *gt)
379 struct intel_engine_cs *engine;
380 enum intel_engine_id id;
382 for_each_engine(engine, gt, id)
383 __setup_engine_capabilities(engine);
387 * intel_engines_release() - free the resources allocated for Command Streamers
388 * @gt: pointer to struct intel_gt
390 void intel_engines_release(struct intel_gt *gt)
392 struct intel_engine_cs *engine;
393 enum intel_engine_id id;
395 /* Decouple the backend; but keep the layout for late GPU resets */
396 for_each_engine(engine, gt, id) {
397 if (!engine->release)
400 engine->release(engine);
401 engine->release = NULL;
403 memset(&engine->reset, 0, sizeof(engine->reset));
405 gt->i915->engine[id] = NULL;
409 void intel_engines_free(struct intel_gt *gt)
411 struct intel_engine_cs *engine;
412 enum intel_engine_id id;
414 for_each_engine(engine, gt, id) {
416 gt->engine[id] = NULL;
421 * intel_engines_init_mmio() - allocate and prepare the Engine Command Streamers
422 * @gt: pointer to struct intel_gt
424 * Return: non-zero if the initialization failed.
426 int intel_engines_init_mmio(struct intel_gt *gt)
428 struct drm_i915_private *i915 = gt->i915;
429 struct intel_device_info *device_info = mkwrite_device_info(i915);
430 const unsigned int engine_mask = INTEL_INFO(i915)->engine_mask;
431 unsigned int mask = 0;
435 WARN_ON(engine_mask == 0);
436 WARN_ON(engine_mask &
437 GENMASK(BITS_PER_TYPE(mask) - 1, I915_NUM_ENGINES));
439 if (i915_inject_probe_failure(i915))
442 for (i = 0; i < ARRAY_SIZE(intel_engines); i++) {
443 if (!HAS_ENGINE(i915, i))
446 err = intel_engine_setup(gt, i);
454 * Catch failures to update intel_engines table when the new engines
455 * are added to the driver by a warning and disabling the forgotten
458 if (WARN_ON(mask != engine_mask))
459 device_info->engine_mask = mask;
461 RUNTIME_INFO(i915)->num_engines = hweight32(mask);
463 intel_gt_check_and_clear_faults(gt);
465 intel_setup_engine_capabilities(gt);
470 intel_engines_free(gt);
474 void intel_engine_init_execlists(struct intel_engine_cs *engine)
476 struct intel_engine_execlists * const execlists = &engine->execlists;
478 execlists->port_mask = 1;
479 GEM_BUG_ON(!is_power_of_2(execlists_num_ports(execlists)));
480 GEM_BUG_ON(execlists_num_ports(execlists) > EXECLIST_MAX_PORTS);
482 memset(execlists->pending, 0, sizeof(execlists->pending));
484 memset(execlists->inflight, 0, sizeof(execlists->inflight));
486 execlists->queue_priority_hint = INT_MIN;
487 execlists->queue = RB_ROOT_CACHED;
490 static void cleanup_status_page(struct intel_engine_cs *engine)
492 struct i915_vma *vma;
494 /* Prevent writes into HWSP after returning the page to the system */
495 intel_engine_set_hwsp_writemask(engine, ~0u);
497 vma = fetch_and_zero(&engine->status_page.vma);
501 if (!HWS_NEEDS_PHYSICAL(engine->i915))
504 i915_gem_object_unpin_map(vma->obj);
505 i915_gem_object_put(vma->obj);
508 static int pin_ggtt_status_page(struct intel_engine_cs *engine,
509 struct i915_vma *vma)
514 if (!HAS_LLC(engine->i915) && i915_ggtt_has_aperture(engine->gt->ggtt))
516 * On g33, we cannot place HWS above 256MiB, so
517 * restrict its pinning to the low mappable arena.
518 * Though this restriction is not documented for
519 * gen4, gen5, or byt, they also behave similarly
520 * and hang if the HWS is placed at the top of the
521 * GTT. To generalise, it appears that all !llc
522 * platforms have issues with us placing the HWS
523 * above the mappable region (even though we never
526 flags |= PIN_MAPPABLE;
530 return i915_vma_pin(vma, 0, 0, flags);
533 static int init_status_page(struct intel_engine_cs *engine)
535 struct drm_i915_gem_object *obj;
536 struct i915_vma *vma;
541 * Though the HWS register does support 36bit addresses, historically
542 * we have had hangs and corruption reported due to wild writes if
543 * the HWS is placed above 4G. We only allow objects to be allocated
544 * in GFP_DMA32 for i965, and no earlier physical address users had
545 * access to more than 4G.
547 obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
549 DRM_ERROR("Failed to allocate status page\n");
553 i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
555 vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
561 vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
563 ret = PTR_ERR(vaddr);
567 engine->status_page.addr = memset(vaddr, 0, PAGE_SIZE);
568 engine->status_page.vma = vma;
570 if (!HWS_NEEDS_PHYSICAL(engine->i915)) {
571 ret = pin_ggtt_status_page(engine, vma);
579 i915_gem_object_unpin_map(obj);
581 i915_gem_object_put(obj);
585 static int engine_setup_common(struct intel_engine_cs *engine)
589 init_llist_head(&engine->barrier_tasks);
591 err = init_status_page(engine);
595 intel_engine_init_active(engine, ENGINE_PHYSICAL);
596 intel_engine_init_breadcrumbs(engine);
597 intel_engine_init_execlists(engine);
598 intel_engine_init_cmd_parser(engine);
599 intel_engine_init__pm(engine);
600 intel_engine_init_retire(engine);
602 intel_engine_pool_init(&engine->pool);
604 /* Use the whole device by default */
606 intel_sseu_from_device_info(&RUNTIME_INFO(engine->i915)->sseu);
608 intel_engine_init_workarounds(engine);
609 intel_engine_init_whitelist(engine);
610 intel_engine_init_ctx_wa(engine);
615 struct measure_breadcrumb {
616 struct i915_request rq;
617 struct intel_timeline timeline;
618 struct intel_ring ring;
622 static int measure_breadcrumb_dw(struct intel_engine_cs *engine)
624 struct measure_breadcrumb *frame;
627 GEM_BUG_ON(!engine->gt->scratch);
629 frame = kzalloc(sizeof(*frame), GFP_KERNEL);
633 if (intel_timeline_init(&frame->timeline,
635 engine->status_page.vma))
638 mutex_lock(&frame->timeline.mutex);
640 frame->ring.vaddr = frame->cs;
641 frame->ring.size = sizeof(frame->cs);
642 frame->ring.effective_size = frame->ring.size;
643 intel_ring_update_space(&frame->ring);
645 frame->rq.i915 = engine->i915;
646 frame->rq.engine = engine;
647 frame->rq.ring = &frame->ring;
648 rcu_assign_pointer(frame->rq.timeline, &frame->timeline);
650 dw = intel_timeline_pin(&frame->timeline);
654 spin_lock_irq(&engine->active.lock);
655 dw = engine->emit_fini_breadcrumb(&frame->rq, frame->cs) - frame->cs;
656 spin_unlock_irq(&engine->active.lock);
658 GEM_BUG_ON(dw & 1); /* RING_TAIL must be qword aligned */
660 intel_timeline_unpin(&frame->timeline);
663 mutex_unlock(&frame->timeline.mutex);
664 intel_timeline_fini(&frame->timeline);
671 intel_engine_init_active(struct intel_engine_cs *engine, unsigned int subclass)
673 INIT_LIST_HEAD(&engine->active.requests);
674 INIT_LIST_HEAD(&engine->active.hold);
676 spin_lock_init(&engine->active.lock);
677 lockdep_set_subclass(&engine->active.lock, subclass);
680 * Due to an interesting quirk in lockdep's internal debug tracking,
681 * after setting a subclass we must ensure the lock is used. Otherwise,
682 * nr_unused_locks is incremented once too often.
684 #ifdef CONFIG_DEBUG_LOCK_ALLOC
686 lock_map_acquire(&engine->active.lock.dep_map);
687 lock_map_release(&engine->active.lock.dep_map);
692 static struct intel_context *
693 create_kernel_context(struct intel_engine_cs *engine)
695 static struct lock_class_key kernel;
696 struct intel_context *ce;
699 ce = intel_context_create(engine);
703 __set_bit(CONTEXT_BARRIER_BIT, &ce->flags);
705 err = intel_context_pin(ce); /* perma-pin so it is always available */
707 intel_context_put(ce);
712 * Give our perma-pinned kernel timelines a separate lockdep class,
713 * so that we can use them from within the normal user timelines
714 * should we need to inject GPU operations during their request
717 lockdep_set_class(&ce->timeline->mutex, &kernel);
723 * intel_engines_init_common - initialize cengine state which might require hw access
724 * @engine: Engine to initialize.
726 * Initializes @engine@ structure members shared between legacy and execlists
727 * submission modes which do require hardware access.
729 * Typcally done at later stages of submission mode specific engine setup.
731 * Returns zero on success or an error code on failure.
733 static int engine_init_common(struct intel_engine_cs *engine)
735 struct intel_context *ce;
738 engine->set_default_submission(engine);
740 ret = measure_breadcrumb_dw(engine);
744 engine->emit_fini_breadcrumb_dw = ret;
747 * We may need to do things with the shrinker which
748 * require us to immediately switch back to the default
749 * context. This can cause a problem as pinning the
750 * default context also requires GTT space which may not
751 * be available. To avoid this we always pin the default
754 ce = create_kernel_context(engine);
758 engine->kernel_context = ce;
763 int intel_engines_init(struct intel_gt *gt)
765 int (*setup)(struct intel_engine_cs *engine);
766 struct intel_engine_cs *engine;
767 enum intel_engine_id id;
770 if (HAS_EXECLISTS(gt->i915))
771 setup = intel_execlists_submission_setup;
773 setup = intel_ring_submission_setup;
775 for_each_engine(engine, gt, id) {
776 err = engine_setup_common(engine);
784 err = engine_init_common(engine);
788 intel_engine_add_user(engine);
795 * intel_engines_cleanup_common - cleans up the engine state created by
796 * the common initiailizers.
797 * @engine: Engine to cleanup.
799 * This cleans up everything created by the common helpers.
801 void intel_engine_cleanup_common(struct intel_engine_cs *engine)
803 GEM_BUG_ON(!list_empty(&engine->active.requests));
804 tasklet_kill(&engine->execlists.tasklet); /* flush the callback */
806 cleanup_status_page(engine);
808 intel_engine_fini_retire(engine);
809 intel_engine_pool_fini(&engine->pool);
810 intel_engine_fini_breadcrumbs(engine);
811 intel_engine_cleanup_cmd_parser(engine);
813 if (engine->default_state)
814 i915_gem_object_put(engine->default_state);
816 if (engine->kernel_context) {
817 intel_context_unpin(engine->kernel_context);
818 intel_context_put(engine->kernel_context);
820 GEM_BUG_ON(!llist_empty(&engine->barrier_tasks));
822 intel_wa_list_free(&engine->ctx_wa_list);
823 intel_wa_list_free(&engine->wa_list);
824 intel_wa_list_free(&engine->whitelist);
827 u64 intel_engine_get_active_head(const struct intel_engine_cs *engine)
829 struct drm_i915_private *i915 = engine->i915;
833 if (INTEL_GEN(i915) >= 8)
834 acthd = ENGINE_READ64(engine, RING_ACTHD, RING_ACTHD_UDW);
835 else if (INTEL_GEN(i915) >= 4)
836 acthd = ENGINE_READ(engine, RING_ACTHD);
838 acthd = ENGINE_READ(engine, ACTHD);
843 u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine)
847 if (INTEL_GEN(engine->i915) >= 8)
848 bbaddr = ENGINE_READ64(engine, RING_BBADDR, RING_BBADDR_UDW);
850 bbaddr = ENGINE_READ(engine, RING_BBADDR);
855 static unsigned long stop_timeout(const struct intel_engine_cs *engine)
857 if (in_atomic() || irqs_disabled()) /* inside atomic preempt-reset? */
861 * If we are doing a normal GPU reset, we can take our time and allow
862 * the engine to quiesce. We've stopped submission to the engine, and
863 * if we wait long enough an innocent context should complete and
864 * leave the engine idle. So they should not be caught unaware by
865 * the forthcoming GPU reset (which usually follows the stop_cs)!
867 return READ_ONCE(engine->props.stop_timeout_ms);
870 int intel_engine_stop_cs(struct intel_engine_cs *engine)
872 struct intel_uncore *uncore = engine->uncore;
873 const u32 base = engine->mmio_base;
874 const i915_reg_t mode = RING_MI_MODE(base);
877 if (INTEL_GEN(engine->i915) < 3)
880 ENGINE_TRACE(engine, "\n");
882 intel_uncore_write_fw(uncore, mode, _MASKED_BIT_ENABLE(STOP_RING));
885 if (__intel_wait_for_register_fw(uncore,
886 mode, MODE_IDLE, MODE_IDLE,
887 1000, stop_timeout(engine),
889 ENGINE_TRACE(engine, "timed out on STOP_RING -> IDLE\n");
893 /* A final mmio read to let GPU writes be hopefully flushed to memory */
894 intel_uncore_posting_read_fw(uncore, mode);
899 void intel_engine_cancel_stop_cs(struct intel_engine_cs *engine)
901 ENGINE_TRACE(engine, "\n");
903 ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
906 const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
909 case I915_CACHE_NONE: return " uncached";
910 case I915_CACHE_LLC: return HAS_LLC(i915) ? " LLC" : " snooped";
911 case I915_CACHE_L3_LLC: return " L3+LLC";
912 case I915_CACHE_WT: return " WT";
918 read_subslice_reg(const struct intel_engine_cs *engine,
919 int slice, int subslice, i915_reg_t reg)
921 struct drm_i915_private *i915 = engine->i915;
922 struct intel_uncore *uncore = engine->uncore;
923 u32 mcr_mask, mcr_ss, mcr, old_mcr, val;
924 enum forcewake_domains fw_domains;
926 if (INTEL_GEN(i915) >= 11) {
927 mcr_mask = GEN11_MCR_SLICE_MASK | GEN11_MCR_SUBSLICE_MASK;
928 mcr_ss = GEN11_MCR_SLICE(slice) | GEN11_MCR_SUBSLICE(subslice);
930 mcr_mask = GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK;
931 mcr_ss = GEN8_MCR_SLICE(slice) | GEN8_MCR_SUBSLICE(subslice);
934 fw_domains = intel_uncore_forcewake_for_reg(uncore, reg,
936 fw_domains |= intel_uncore_forcewake_for_reg(uncore,
938 FW_REG_READ | FW_REG_WRITE);
940 spin_lock_irq(&uncore->lock);
941 intel_uncore_forcewake_get__locked(uncore, fw_domains);
943 old_mcr = mcr = intel_uncore_read_fw(uncore, GEN8_MCR_SELECTOR);
947 intel_uncore_write_fw(uncore, GEN8_MCR_SELECTOR, mcr);
949 val = intel_uncore_read_fw(uncore, reg);
952 mcr |= old_mcr & mcr_mask;
954 intel_uncore_write_fw(uncore, GEN8_MCR_SELECTOR, mcr);
956 intel_uncore_forcewake_put__locked(uncore, fw_domains);
957 spin_unlock_irq(&uncore->lock);
962 /* NB: please notice the memset */
963 void intel_engine_get_instdone(const struct intel_engine_cs *engine,
964 struct intel_instdone *instdone)
966 struct drm_i915_private *i915 = engine->i915;
967 const struct sseu_dev_info *sseu = &RUNTIME_INFO(i915)->sseu;
968 struct intel_uncore *uncore = engine->uncore;
969 u32 mmio_base = engine->mmio_base;
973 memset(instdone, 0, sizeof(*instdone));
975 switch (INTEL_GEN(i915)) {
978 intel_uncore_read(uncore, RING_INSTDONE(mmio_base));
980 if (engine->id != RCS0)
983 instdone->slice_common =
984 intel_uncore_read(uncore, GEN7_SC_INSTDONE);
985 for_each_instdone_slice_subslice(i915, sseu, slice, subslice) {
986 instdone->sampler[slice][subslice] =
987 read_subslice_reg(engine, slice, subslice,
988 GEN7_SAMPLER_INSTDONE);
989 instdone->row[slice][subslice] =
990 read_subslice_reg(engine, slice, subslice,
996 intel_uncore_read(uncore, RING_INSTDONE(mmio_base));
998 if (engine->id != RCS0)
1001 instdone->slice_common =
1002 intel_uncore_read(uncore, GEN7_SC_INSTDONE);
1003 instdone->sampler[0][0] =
1004 intel_uncore_read(uncore, GEN7_SAMPLER_INSTDONE);
1005 instdone->row[0][0] =
1006 intel_uncore_read(uncore, GEN7_ROW_INSTDONE);
1012 instdone->instdone =
1013 intel_uncore_read(uncore, RING_INSTDONE(mmio_base));
1014 if (engine->id == RCS0)
1015 /* HACK: Using the wrong struct member */
1016 instdone->slice_common =
1017 intel_uncore_read(uncore, GEN4_INSTDONE1);
1021 instdone->instdone = intel_uncore_read(uncore, GEN2_INSTDONE);
1026 static bool ring_is_idle(struct intel_engine_cs *engine)
1030 if (I915_SELFTEST_ONLY(!engine->mmio_base))
1033 if (!intel_engine_pm_get_if_awake(engine))
1036 /* First check that no commands are left in the ring */
1037 if ((ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR) !=
1038 (ENGINE_READ(engine, RING_TAIL) & TAIL_ADDR))
1041 /* No bit for gen2, so assume the CS parser is idle */
1042 if (INTEL_GEN(engine->i915) > 2 &&
1043 !(ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE))
1046 intel_engine_pm_put(engine);
1051 void intel_engine_flush_submission(struct intel_engine_cs *engine)
1053 struct tasklet_struct *t = &engine->execlists.tasklet;
1055 if (__tasklet_is_scheduled(t)) {
1057 if (tasklet_trylock(t)) {
1058 /* Must wait for any GPU reset in progress. */
1059 if (__tasklet_is_enabled(t))
1066 /* Otherwise flush the tasklet if it was running on another cpu */
1067 tasklet_unlock_wait(t);
1071 * intel_engine_is_idle() - Report if the engine has finished process all work
1072 * @engine: the intel_engine_cs
1074 * Return true if there are no requests pending, nothing left to be submitted
1075 * to hardware, and that the engine is idle.
1077 bool intel_engine_is_idle(struct intel_engine_cs *engine)
1079 /* More white lies, if wedged, hw state is inconsistent */
1080 if (intel_gt_is_wedged(engine->gt))
1083 if (!intel_engine_pm_is_awake(engine))
1086 /* Waiting to drain ELSP? */
1087 if (execlists_active(&engine->execlists)) {
1088 synchronize_hardirq(engine->i915->drm.pdev->irq);
1090 intel_engine_flush_submission(engine);
1092 if (execlists_active(&engine->execlists))
1096 /* ELSP is empty, but there are ready requests? E.g. after reset */
1097 if (!RB_EMPTY_ROOT(&engine->execlists.queue.rb_root))
1101 return ring_is_idle(engine);
1104 bool intel_engines_are_idle(struct intel_gt *gt)
1106 struct intel_engine_cs *engine;
1107 enum intel_engine_id id;
1110 * If the driver is wedged, HW state may be very inconsistent and
1111 * report that it is still busy, even though we have stopped using it.
1113 if (intel_gt_is_wedged(gt))
1116 /* Already parked (and passed an idleness test); must still be idle */
1117 if (!READ_ONCE(gt->awake))
1120 for_each_engine(engine, gt, id) {
1121 if (!intel_engine_is_idle(engine))
1128 void intel_engines_reset_default_submission(struct intel_gt *gt)
1130 struct intel_engine_cs *engine;
1131 enum intel_engine_id id;
1133 for_each_engine(engine, gt, id)
1134 engine->set_default_submission(engine);
1137 bool intel_engine_can_store_dword(struct intel_engine_cs *engine)
1139 switch (INTEL_GEN(engine->i915)) {
1141 return false; /* uses physical not virtual addresses */
1143 /* maybe only uses physical not virtual addresses */
1144 return !(IS_I915G(engine->i915) || IS_I915GM(engine->i915));
1146 return !IS_I965G(engine->i915); /* who knows! */
1148 return engine->class != VIDEO_DECODE_CLASS; /* b0rked */
1154 static int print_sched_attr(struct drm_i915_private *i915,
1155 const struct i915_sched_attr *attr,
1156 char *buf, int x, int len)
1158 if (attr->priority == I915_PRIORITY_INVALID)
1161 x += snprintf(buf + x, len - x,
1162 " prio=%d", attr->priority);
1167 static void print_request(struct drm_printer *m,
1168 struct i915_request *rq,
1171 const char *name = rq->fence.ops->get_timeline_name(&rq->fence);
1175 x = print_sched_attr(rq->i915, &rq->sched.attr, buf, x, sizeof(buf));
1177 drm_printf(m, "%s %llx:%llx%s%s %s @ %dms: %s\n",
1179 rq->fence.context, rq->fence.seqno,
1180 i915_request_completed(rq) ? "!" :
1181 i915_request_started(rq) ? "*" :
1183 test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
1184 &rq->fence.flags) ? "+" :
1185 test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
1186 &rq->fence.flags) ? "-" :
1189 jiffies_to_msecs(jiffies - rq->emitted_jiffies),
1193 static void hexdump(struct drm_printer *m, const void *buf, size_t len)
1195 const size_t rowsize = 8 * sizeof(u32);
1196 const void *prev = NULL;
1200 for (pos = 0; pos < len; pos += rowsize) {
1203 if (prev && !memcmp(prev, buf + pos, rowsize)) {
1205 drm_printf(m, "*\n");
1211 WARN_ON_ONCE(hex_dump_to_buffer(buf + pos, len - pos,
1212 rowsize, sizeof(u32),
1214 false) >= sizeof(line));
1215 drm_printf(m, "[%04zx] %s\n", pos, line);
1222 static struct intel_timeline *get_timeline(struct i915_request *rq)
1224 struct intel_timeline *tl;
1227 * Even though we are holding the engine->active.lock here, there
1228 * is no control over the submission queue per-se and we are
1229 * inspecting the active state at a random point in time, with an
1230 * unknown queue. Play safe and make sure the timeline remains valid.
1231 * (Only being used for pretty printing, one extra kref shouldn't
1232 * cause a camel stampede!)
1235 tl = rcu_dereference(rq->timeline);
1236 if (!kref_get_unless_zero(&tl->kref))
1243 static const char *repr_timer(const struct timer_list *t)
1245 if (!READ_ONCE(t->expires))
1248 if (timer_pending(t))
1254 static void intel_engine_print_registers(struct intel_engine_cs *engine,
1255 struct drm_printer *m)
1257 struct drm_i915_private *dev_priv = engine->i915;
1258 struct intel_engine_execlists * const execlists = &engine->execlists;
1261 if (engine->id == RENDER_CLASS && IS_GEN_RANGE(dev_priv, 4, 7))
1262 drm_printf(m, "\tCCID: 0x%08x\n", ENGINE_READ(engine, CCID));
1263 drm_printf(m, "\tRING_START: 0x%08x\n",
1264 ENGINE_READ(engine, RING_START));
1265 drm_printf(m, "\tRING_HEAD: 0x%08x\n",
1266 ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR);
1267 drm_printf(m, "\tRING_TAIL: 0x%08x\n",
1268 ENGINE_READ(engine, RING_TAIL) & TAIL_ADDR);
1269 drm_printf(m, "\tRING_CTL: 0x%08x%s\n",
1270 ENGINE_READ(engine, RING_CTL),
1271 ENGINE_READ(engine, RING_CTL) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? " [waiting]" : "");
1272 if (INTEL_GEN(engine->i915) > 2) {
1273 drm_printf(m, "\tRING_MODE: 0x%08x%s\n",
1274 ENGINE_READ(engine, RING_MI_MODE),
1275 ENGINE_READ(engine, RING_MI_MODE) & (MODE_IDLE) ? " [idle]" : "");
1278 if (INTEL_GEN(dev_priv) >= 6) {
1279 drm_printf(m, "\tRING_IMR: %08x\n",
1280 ENGINE_READ(engine, RING_IMR));
1283 addr = intel_engine_get_active_head(engine);
1284 drm_printf(m, "\tACTHD: 0x%08x_%08x\n",
1285 upper_32_bits(addr), lower_32_bits(addr));
1286 addr = intel_engine_get_last_batch_head(engine);
1287 drm_printf(m, "\tBBADDR: 0x%08x_%08x\n",
1288 upper_32_bits(addr), lower_32_bits(addr));
1289 if (INTEL_GEN(dev_priv) >= 8)
1290 addr = ENGINE_READ64(engine, RING_DMA_FADD, RING_DMA_FADD_UDW);
1291 else if (INTEL_GEN(dev_priv) >= 4)
1292 addr = ENGINE_READ(engine, RING_DMA_FADD);
1294 addr = ENGINE_READ(engine, DMA_FADD_I8XX);
1295 drm_printf(m, "\tDMA_FADDR: 0x%08x_%08x\n",
1296 upper_32_bits(addr), lower_32_bits(addr));
1297 if (INTEL_GEN(dev_priv) >= 4) {
1298 drm_printf(m, "\tIPEIR: 0x%08x\n",
1299 ENGINE_READ(engine, RING_IPEIR));
1300 drm_printf(m, "\tIPEHR: 0x%08x\n",
1301 ENGINE_READ(engine, RING_IPEHR));
1303 drm_printf(m, "\tIPEIR: 0x%08x\n", ENGINE_READ(engine, IPEIR));
1304 drm_printf(m, "\tIPEHR: 0x%08x\n", ENGINE_READ(engine, IPEHR));
1307 if (HAS_EXECLISTS(dev_priv)) {
1308 struct i915_request * const *port, *rq;
1310 &engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX];
1311 const u8 num_entries = execlists->csb_size;
1315 drm_printf(m, "\tExeclist tasklet queued? %s (%s), preempt? %s, timeslice? %s\n",
1316 yesno(test_bit(TASKLET_STATE_SCHED,
1317 &engine->execlists.tasklet.state)),
1318 enableddisabled(!atomic_read(&engine->execlists.tasklet.count)),
1319 repr_timer(&engine->execlists.preempt),
1320 repr_timer(&engine->execlists.timer));
1322 read = execlists->csb_head;
1323 write = READ_ONCE(*execlists->csb_write);
1325 drm_printf(m, "\tExeclist status: 0x%08x %08x; CSB read:%d, write:%d, entries:%d\n",
1326 ENGINE_READ(engine, RING_EXECLIST_STATUS_LO),
1327 ENGINE_READ(engine, RING_EXECLIST_STATUS_HI),
1328 read, write, num_entries);
1330 if (read >= num_entries)
1332 if (write >= num_entries)
1335 write += num_entries;
1336 while (read < write) {
1337 idx = ++read % num_entries;
1338 drm_printf(m, "\tExeclist CSB[%d]: 0x%08x, context: %d\n",
1339 idx, hws[idx * 2], hws[idx * 2 + 1]);
1342 execlists_active_lock_bh(execlists);
1344 for (port = execlists->active; (rq = *port); port++) {
1348 len = snprintf(hdr, sizeof(hdr),
1350 (int)(port - execlists->active));
1351 if (!i915_request_signaled(rq)) {
1352 struct intel_timeline *tl = get_timeline(rq);
1354 len += snprintf(hdr + len, sizeof(hdr) - len,
1355 "ring:{start:%08x, hwsp:%08x, seqno:%08x}, ",
1356 i915_ggtt_offset(rq->ring->vma),
1357 tl ? tl->hwsp_offset : 0,
1361 intel_timeline_put(tl);
1363 snprintf(hdr + len, sizeof(hdr) - len, "rq: ");
1364 print_request(m, rq, hdr);
1366 for (port = execlists->pending; (rq = *port); port++) {
1367 struct intel_timeline *tl = get_timeline(rq);
1370 snprintf(hdr, sizeof(hdr),
1371 "\t\tPending[%d] ring:{start:%08x, hwsp:%08x, seqno:%08x}, rq: ",
1372 (int)(port - execlists->pending),
1373 i915_ggtt_offset(rq->ring->vma),
1374 tl ? tl->hwsp_offset : 0,
1376 print_request(m, rq, hdr);
1379 intel_timeline_put(tl);
1382 execlists_active_unlock_bh(execlists);
1383 } else if (INTEL_GEN(dev_priv) > 6) {
1384 drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
1385 ENGINE_READ(engine, RING_PP_DIR_BASE));
1386 drm_printf(m, "\tPP_DIR_BASE_READ: 0x%08x\n",
1387 ENGINE_READ(engine, RING_PP_DIR_BASE_READ));
1388 drm_printf(m, "\tPP_DIR_DCLV: 0x%08x\n",
1389 ENGINE_READ(engine, RING_PP_DIR_DCLV));
1393 static void print_request_ring(struct drm_printer *m, struct i915_request *rq)
1399 "[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]:\n",
1400 rq->head, rq->postfix, rq->tail,
1401 rq->batch ? upper_32_bits(rq->batch->node.start) : ~0u,
1402 rq->batch ? lower_32_bits(rq->batch->node.start) : ~0u);
1404 size = rq->tail - rq->head;
1405 if (rq->tail < rq->head)
1406 size += rq->ring->size;
1408 ring = kmalloc(size, GFP_ATOMIC);
1410 const void *vaddr = rq->ring->vaddr;
1411 unsigned int head = rq->head;
1412 unsigned int len = 0;
1414 if (rq->tail < head) {
1415 len = rq->ring->size - head;
1416 memcpy(ring, vaddr + head, len);
1419 memcpy(ring + len, vaddr + head, size - len);
1421 hexdump(m, ring, size);
1426 static unsigned long list_count(struct list_head *list)
1428 struct list_head *pos;
1429 unsigned long count = 0;
1431 list_for_each(pos, list)
1437 void intel_engine_dump(struct intel_engine_cs *engine,
1438 struct drm_printer *m,
1439 const char *header, ...)
1441 struct i915_gpu_error * const error = &engine->i915->gpu_error;
1442 struct i915_request *rq;
1443 intel_wakeref_t wakeref;
1444 unsigned long flags;
1449 va_start(ap, header);
1450 drm_vprintf(m, header, &ap);
1454 if (intel_gt_is_wedged(engine->gt))
1455 drm_printf(m, "*** WEDGED ***\n");
1457 drm_printf(m, "\tAwake? %d\n", atomic_read(&engine->wakeref.count));
1458 drm_printf(m, "\tBarriers?: %s\n",
1459 yesno(!llist_empty(&engine->barrier_tasks)));
1460 drm_printf(m, "\tLatency: %luus\n",
1461 ewma__engine_latency_read(&engine->latency));
1464 rq = READ_ONCE(engine->heartbeat.systole);
1466 drm_printf(m, "\tHeartbeat: %d ms ago\n",
1467 jiffies_to_msecs(jiffies - rq->emitted_jiffies));
1469 drm_printf(m, "\tReset count: %d (global %d)\n",
1470 i915_reset_engine_count(error, engine),
1471 i915_reset_count(error));
1473 drm_printf(m, "\tRequests:\n");
1475 spin_lock_irqsave(&engine->active.lock, flags);
1476 rq = intel_engine_find_active_request(engine);
1478 struct intel_timeline *tl = get_timeline(rq);
1480 print_request(m, rq, "\t\tactive ");
1482 drm_printf(m, "\t\tring->start: 0x%08x\n",
1483 i915_ggtt_offset(rq->ring->vma));
1484 drm_printf(m, "\t\tring->head: 0x%08x\n",
1486 drm_printf(m, "\t\tring->tail: 0x%08x\n",
1488 drm_printf(m, "\t\tring->emit: 0x%08x\n",
1490 drm_printf(m, "\t\tring->space: 0x%08x\n",
1494 drm_printf(m, "\t\tring->hwsp: 0x%08x\n",
1496 intel_timeline_put(tl);
1499 print_request_ring(m, rq);
1501 if (rq->context->lrc_reg_state) {
1502 drm_printf(m, "Logical Ring Context:\n");
1503 hexdump(m, rq->context->lrc_reg_state, PAGE_SIZE);
1506 drm_printf(m, "\tOn hold?: %lu\n", list_count(&engine->active.hold));
1507 spin_unlock_irqrestore(&engine->active.lock, flags);
1509 drm_printf(m, "\tMMIO base: 0x%08x\n", engine->mmio_base);
1510 wakeref = intel_runtime_pm_get_if_in_use(engine->uncore->rpm);
1512 intel_engine_print_registers(engine, m);
1513 intel_runtime_pm_put(engine->uncore->rpm, wakeref);
1515 drm_printf(m, "\tDevice is asleep; skipping register dump\n");
1518 intel_execlists_show_requests(engine, m, print_request, 8);
1520 drm_printf(m, "HWSP:\n");
1521 hexdump(m, engine->status_page.addr, PAGE_SIZE);
1523 drm_printf(m, "Idle? %s\n", yesno(intel_engine_is_idle(engine)));
1525 intel_engine_print_breadcrumbs(engine, m);
1529 * intel_enable_engine_stats() - Enable engine busy tracking on engine
1530 * @engine: engine to enable stats collection
1532 * Start collecting the engine busyness data for @engine.
1534 * Returns 0 on success or a negative error code.
1536 int intel_enable_engine_stats(struct intel_engine_cs *engine)
1538 struct intel_engine_execlists *execlists = &engine->execlists;
1539 unsigned long flags;
1542 if (!intel_engine_supports_stats(engine))
1545 execlists_active_lock_bh(execlists);
1546 write_seqlock_irqsave(&engine->stats.lock, flags);
1548 if (unlikely(engine->stats.enabled == ~0)) {
1553 if (engine->stats.enabled++ == 0) {
1554 struct i915_request * const *port;
1555 struct i915_request *rq;
1557 engine->stats.enabled_at = ktime_get();
1559 /* XXX submission method oblivious? */
1560 for (port = execlists->active; (rq = *port); port++)
1561 engine->stats.active++;
1563 for (port = execlists->pending; (rq = *port); port++) {
1564 /* Exclude any contexts already counted in active */
1565 if (!intel_context_inflight_count(rq->context))
1566 engine->stats.active++;
1569 if (engine->stats.active)
1570 engine->stats.start = engine->stats.enabled_at;
1574 write_sequnlock_irqrestore(&engine->stats.lock, flags);
1575 execlists_active_unlock_bh(execlists);
1580 static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine)
1582 ktime_t total = engine->stats.total;
1585 * If the engine is executing something at the moment
1586 * add it to the total.
1588 if (engine->stats.active)
1589 total = ktime_add(total,
1590 ktime_sub(ktime_get(), engine->stats.start));
1596 * intel_engine_get_busy_time() - Return current accumulated engine busyness
1597 * @engine: engine to report on
1599 * Returns accumulated time @engine was busy since engine stats were enabled.
1601 ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine)
1607 seq = read_seqbegin(&engine->stats.lock);
1608 total = __intel_engine_get_busy_time(engine);
1609 } while (read_seqretry(&engine->stats.lock, seq));
1615 * intel_disable_engine_stats() - Disable engine busy tracking on engine
1616 * @engine: engine to disable stats collection
1618 * Stops collecting the engine busyness data for @engine.
1620 void intel_disable_engine_stats(struct intel_engine_cs *engine)
1622 unsigned long flags;
1624 if (!intel_engine_supports_stats(engine))
1627 write_seqlock_irqsave(&engine->stats.lock, flags);
1628 WARN_ON_ONCE(engine->stats.enabled == 0);
1629 if (--engine->stats.enabled == 0) {
1630 engine->stats.total = __intel_engine_get_busy_time(engine);
1631 engine->stats.active = 0;
1633 write_sequnlock_irqrestore(&engine->stats.lock, flags);
1636 static bool match_ring(struct i915_request *rq)
1638 u32 ring = ENGINE_READ(rq->engine, RING_START);
1640 return ring == i915_ggtt_offset(rq->ring->vma);
1643 struct i915_request *
1644 intel_engine_find_active_request(struct intel_engine_cs *engine)
1646 struct i915_request *request, *active = NULL;
1649 * We are called by the error capture, reset and to dump engine
1650 * state at random points in time. In particular, note that neither is
1651 * crucially ordered with an interrupt. After a hang, the GPU is dead
1652 * and we assume that no more writes can happen (we waited long enough
1653 * for all writes that were in transaction to be flushed) - adding an
1654 * extra delay for a recent interrupt is pointless. Hence, we do
1655 * not need an engine->irq_seqno_barrier() before the seqno reads.
1656 * At all other times, we must assume the GPU is still running, but
1657 * we only care about the snapshot of this moment.
1659 lockdep_assert_held(&engine->active.lock);
1660 list_for_each_entry(request, &engine->active.requests, sched.link) {
1661 if (i915_request_completed(request))
1664 if (!i915_request_started(request))
1667 /* More than one preemptible request may match! */
1668 if (!match_ring(request))
1678 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1679 #include "mock_engine.c"
1680 #include "selftest_engine.c"
1681 #include "selftest_engine_cs.c"