2 * Copyright © 2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include <drm/drm_print.h>
27 #include "gem/i915_gem_context.h"
31 #include "gt/intel_gt.h"
33 #include "intel_engine.h"
34 #include "intel_engine_pm.h"
35 #include "intel_context.h"
36 #include "intel_lrc.h"
37 #include "intel_reset.h"
39 /* Haswell does have the CXT_SIZE register however it does not appear to be
40 * valid. Now, docs explain in dwords what is in the context object. The full
41 * size is 70720 bytes, however, the power context and execlist context will
42 * never be saved (power context is stored elsewhere, and execlists don't work
43 * on HSW) - so the final size, including the extra state required for the
44 * Resource Streamer, is 66944 bytes, which rounds to 17 pages.
46 #define HSW_CXT_TOTAL_SIZE (17 * PAGE_SIZE)
48 #define DEFAULT_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
49 #define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE)
50 #define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
51 #define GEN10_LR_CONTEXT_RENDER_SIZE (18 * PAGE_SIZE)
52 #define GEN11_LR_CONTEXT_RENDER_SIZE (14 * PAGE_SIZE)
54 #define GEN8_LR_CONTEXT_OTHER_SIZE ( 2 * PAGE_SIZE)
56 struct engine_class_info {
61 static const struct engine_class_info intel_engine_classes[] = {
64 .uabi_class = I915_ENGINE_CLASS_RENDER,
66 [COPY_ENGINE_CLASS] = {
68 .uabi_class = I915_ENGINE_CLASS_COPY,
70 [VIDEO_DECODE_CLASS] = {
72 .uabi_class = I915_ENGINE_CLASS_VIDEO,
74 [VIDEO_ENHANCEMENT_CLASS] = {
76 .uabi_class = I915_ENGINE_CLASS_VIDEO_ENHANCE,
80 #define MAX_MMIO_BASES 3
85 /* mmio bases table *must* be sorted in reverse gen order */
86 struct engine_mmio_base {
89 } mmio_bases[MAX_MMIO_BASES];
92 static const struct engine_info intel_engines[] = {
95 .class = RENDER_CLASS,
98 { .gen = 1, .base = RENDER_RING_BASE }
103 .class = COPY_ENGINE_CLASS,
106 { .gen = 6, .base = BLT_RING_BASE }
111 .class = VIDEO_DECODE_CLASS,
114 { .gen = 11, .base = GEN11_BSD_RING_BASE },
115 { .gen = 6, .base = GEN6_BSD_RING_BASE },
116 { .gen = 4, .base = BSD_RING_BASE }
121 .class = VIDEO_DECODE_CLASS,
124 { .gen = 11, .base = GEN11_BSD2_RING_BASE },
125 { .gen = 8, .base = GEN8_BSD2_RING_BASE }
130 .class = VIDEO_DECODE_CLASS,
133 { .gen = 11, .base = GEN11_BSD3_RING_BASE }
138 .class = VIDEO_DECODE_CLASS,
141 { .gen = 11, .base = GEN11_BSD4_RING_BASE }
146 .class = VIDEO_ENHANCEMENT_CLASS,
149 { .gen = 11, .base = GEN11_VEBOX_RING_BASE },
150 { .gen = 7, .base = VEBOX_RING_BASE }
155 .class = VIDEO_ENHANCEMENT_CLASS,
158 { .gen = 11, .base = GEN11_VEBOX2_RING_BASE }
164 * intel_engine_context_size() - return the size of the context for an engine
165 * @dev_priv: i915 device private
166 * @class: engine class
168 * Each engine class may require a different amount of space for a context
171 * Return: size (in bytes) of an engine class specific context image
173 * Note: this size includes the HWSP, which is part of the context image
174 * in LRC mode, but does not include the "shared data page" used with
175 * GuC submission. The caller should account for this if using the GuC.
177 u32 intel_engine_context_size(struct drm_i915_private *dev_priv, u8 class)
181 BUILD_BUG_ON(I915_GTT_PAGE_SIZE != PAGE_SIZE);
185 switch (INTEL_GEN(dev_priv)) {
187 MISSING_CASE(INTEL_GEN(dev_priv));
188 return DEFAULT_LR_CONTEXT_RENDER_SIZE;
190 return GEN11_LR_CONTEXT_RENDER_SIZE;
192 return GEN10_LR_CONTEXT_RENDER_SIZE;
194 return GEN9_LR_CONTEXT_RENDER_SIZE;
196 return GEN8_LR_CONTEXT_RENDER_SIZE;
198 if (IS_HASWELL(dev_priv))
199 return HSW_CXT_TOTAL_SIZE;
201 cxt_size = I915_READ(GEN7_CXT_SIZE);
202 return round_up(GEN7_CXT_TOTAL_SIZE(cxt_size) * 64,
205 cxt_size = I915_READ(CXT_SIZE);
206 return round_up(GEN6_CXT_TOTAL_SIZE(cxt_size) * 64,
211 * There is a discrepancy here between the size reported
212 * by the register and the size of the context layout
213 * in the docs. Both are described as authorative!
215 * The discrepancy is on the order of a few cachelines,
216 * but the total is under one page (4k), which is our
217 * minimum allocation anyway so it should all come
220 cxt_size = I915_READ(CXT_SIZE) + 1;
221 DRM_DEBUG_DRIVER("gen%d CXT_SIZE = %d bytes [0x%08x]\n",
225 return round_up(cxt_size * 64, PAGE_SIZE);
228 /* For the special day when i810 gets merged. */
236 case VIDEO_DECODE_CLASS:
237 case VIDEO_ENHANCEMENT_CLASS:
238 case COPY_ENGINE_CLASS:
239 if (INTEL_GEN(dev_priv) < 8)
241 return GEN8_LR_CONTEXT_OTHER_SIZE;
245 static u32 __engine_mmio_base(struct drm_i915_private *i915,
246 const struct engine_mmio_base *bases)
250 for (i = 0; i < MAX_MMIO_BASES; i++)
251 if (INTEL_GEN(i915) >= bases[i].gen)
254 GEM_BUG_ON(i == MAX_MMIO_BASES);
255 GEM_BUG_ON(!bases[i].base);
257 return bases[i].base;
260 static void __sprint_engine_name(char *name, const struct engine_info *info)
262 WARN_ON(snprintf(name, INTEL_ENGINE_CS_MAX_NAME, "%s%u",
263 intel_engine_classes[info->class].name,
264 info->instance) >= INTEL_ENGINE_CS_MAX_NAME);
267 void intel_engine_set_hwsp_writemask(struct intel_engine_cs *engine, u32 mask)
270 * Though they added more rings on g4x/ilk, they did not add
271 * per-engine HWSTAM until gen6.
273 if (INTEL_GEN(engine->i915) < 6 && engine->class != RENDER_CLASS)
276 if (INTEL_GEN(engine->i915) >= 3)
277 ENGINE_WRITE(engine, RING_HWSTAM, mask);
279 ENGINE_WRITE16(engine, RING_HWSTAM, mask);
282 static void intel_engine_sanitize_mmio(struct intel_engine_cs *engine)
284 /* Mask off all writes into the unknown HWSP */
285 intel_engine_set_hwsp_writemask(engine, ~0u);
289 intel_engine_setup(struct drm_i915_private *dev_priv,
290 enum intel_engine_id id)
292 const struct engine_info *info = &intel_engines[id];
293 struct intel_engine_cs *engine;
295 GEM_BUG_ON(info->class >= ARRAY_SIZE(intel_engine_classes));
297 BUILD_BUG_ON(MAX_ENGINE_CLASS >= BIT(GEN11_ENGINE_CLASS_WIDTH));
298 BUILD_BUG_ON(MAX_ENGINE_INSTANCE >= BIT(GEN11_ENGINE_INSTANCE_WIDTH));
300 if (GEM_DEBUG_WARN_ON(info->class > MAX_ENGINE_CLASS))
303 if (GEM_DEBUG_WARN_ON(info->instance > MAX_ENGINE_INSTANCE))
306 if (GEM_DEBUG_WARN_ON(dev_priv->engine_class[info->class][info->instance]))
309 GEM_BUG_ON(dev_priv->engine[id]);
310 engine = kzalloc(sizeof(*engine), GFP_KERNEL);
314 BUILD_BUG_ON(BITS_PER_TYPE(engine->mask) < I915_NUM_ENGINES);
317 engine->mask = BIT(id);
318 engine->i915 = dev_priv;
319 engine->gt = &dev_priv->gt;
320 engine->uncore = &dev_priv->uncore;
321 __sprint_engine_name(engine->name, info);
322 engine->hw_id = engine->guc_id = info->hw_id;
323 engine->mmio_base = __engine_mmio_base(dev_priv, info->mmio_bases);
324 engine->class = info->class;
325 engine->instance = info->instance;
328 * To be overridden by the backend on setup. However to facilitate
329 * cleanup on error during setup, we always provide the destroy vfunc.
331 engine->destroy = (typeof(engine->destroy))kfree;
333 engine->uabi_class = intel_engine_classes[info->class].uabi_class;
335 engine->context_size = intel_engine_context_size(dev_priv,
337 if (WARN_ON(engine->context_size > BIT(20)))
338 engine->context_size = 0;
339 if (engine->context_size)
340 DRIVER_CAPS(dev_priv)->has_logical_contexts = true;
342 /* Nothing to do here, execute in order of dependencies */
343 engine->schedule = NULL;
345 seqlock_init(&engine->stats.lock);
347 ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier);
349 /* Scrub mmio state on takeover */
350 intel_engine_sanitize_mmio(engine);
352 dev_priv->engine_class[info->class][info->instance] = engine;
353 dev_priv->engine[id] = engine;
357 static void __setup_engine_capabilities(struct intel_engine_cs *engine)
359 struct drm_i915_private *i915 = engine->i915;
361 if (engine->class == VIDEO_DECODE_CLASS) {
363 * HEVC support is present on first engine instance
364 * before Gen11 and on all instances afterwards.
366 if (INTEL_GEN(i915) >= 11 ||
367 (INTEL_GEN(i915) >= 9 && engine->instance == 0))
368 engine->uabi_capabilities |=
369 I915_VIDEO_CLASS_CAPABILITY_HEVC;
372 * SFC block is present only on even logical engine
375 if ((INTEL_GEN(i915) >= 11 &&
376 RUNTIME_INFO(i915)->vdbox_sfc_access & engine->mask) ||
377 (INTEL_GEN(i915) >= 9 && engine->instance == 0))
378 engine->uabi_capabilities |=
379 I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC;
380 } else if (engine->class == VIDEO_ENHANCEMENT_CLASS) {
381 if (INTEL_GEN(i915) >= 9)
382 engine->uabi_capabilities |=
383 I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC;
387 static void intel_setup_engine_capabilities(struct drm_i915_private *i915)
389 struct intel_engine_cs *engine;
390 enum intel_engine_id id;
392 for_each_engine(engine, i915, id)
393 __setup_engine_capabilities(engine);
397 * intel_engines_cleanup() - free the resources allocated for Command Streamers
398 * @i915: the i915 devic
400 void intel_engines_cleanup(struct drm_i915_private *i915)
402 struct intel_engine_cs *engine;
403 enum intel_engine_id id;
405 for_each_engine(engine, i915, id) {
406 engine->destroy(engine);
407 i915->engine[id] = NULL;
412 * intel_engines_init_mmio() - allocate and prepare the Engine Command Streamers
413 * @i915: the i915 device
415 * Return: non-zero if the initialization failed.
417 int intel_engines_init_mmio(struct drm_i915_private *i915)
419 struct intel_device_info *device_info = mkwrite_device_info(i915);
420 const unsigned int engine_mask = INTEL_INFO(i915)->engine_mask;
421 unsigned int mask = 0;
425 WARN_ON(engine_mask == 0);
426 WARN_ON(engine_mask &
427 GENMASK(BITS_PER_TYPE(mask) - 1, I915_NUM_ENGINES));
429 if (i915_inject_load_failure())
432 for (i = 0; i < ARRAY_SIZE(intel_engines); i++) {
433 if (!HAS_ENGINE(i915, i))
436 err = intel_engine_setup(i915, i);
444 * Catch failures to update intel_engines table when the new engines
445 * are added to the driver by a warning and disabling the forgotten
448 if (WARN_ON(mask != engine_mask))
449 device_info->engine_mask = mask;
451 /* We always presume we have at least RCS available for later probing */
452 if (WARN_ON(!HAS_ENGINE(i915, RCS0))) {
457 RUNTIME_INFO(i915)->num_engines = hweight32(mask);
459 intel_gt_check_and_clear_faults(&i915->gt);
461 intel_setup_engine_capabilities(i915);
466 intel_engines_cleanup(i915);
471 * intel_engines_init() - init the Engine Command Streamers
472 * @i915: i915 device private
474 * Return: non-zero if the initialization failed.
476 int intel_engines_init(struct drm_i915_private *i915)
478 int (*init)(struct intel_engine_cs *engine);
479 struct intel_engine_cs *engine;
480 enum intel_engine_id id;
483 if (HAS_EXECLISTS(i915))
484 init = intel_execlists_submission_init;
486 init = intel_ring_submission_init;
488 for_each_engine(engine, i915, id) {
497 intel_engines_cleanup(i915);
501 static void intel_engine_init_batch_pool(struct intel_engine_cs *engine)
503 i915_gem_batch_pool_init(&engine->batch_pool, engine);
506 void intel_engine_init_execlists(struct intel_engine_cs *engine)
508 struct intel_engine_execlists * const execlists = &engine->execlists;
510 execlists->port_mask = 1;
511 GEM_BUG_ON(!is_power_of_2(execlists_num_ports(execlists)));
512 GEM_BUG_ON(execlists_num_ports(execlists) > EXECLIST_MAX_PORTS);
514 memset(execlists->pending, 0, sizeof(execlists->pending));
516 memset(execlists->inflight, 0, sizeof(execlists->inflight));
518 execlists->queue_priority_hint = INT_MIN;
519 execlists->queue = RB_ROOT_CACHED;
522 static void cleanup_status_page(struct intel_engine_cs *engine)
524 struct i915_vma *vma;
526 /* Prevent writes into HWSP after returning the page to the system */
527 intel_engine_set_hwsp_writemask(engine, ~0u);
529 vma = fetch_and_zero(&engine->status_page.vma);
533 if (!HWS_NEEDS_PHYSICAL(engine->i915))
536 i915_gem_object_unpin_map(vma->obj);
537 i915_gem_object_put(vma->obj);
540 static int pin_ggtt_status_page(struct intel_engine_cs *engine,
541 struct i915_vma *vma)
546 if (!HAS_LLC(engine->i915))
548 * On g33, we cannot place HWS above 256MiB, so
549 * restrict its pinning to the low mappable arena.
550 * Though this restriction is not documented for
551 * gen4, gen5, or byt, they also behave similarly
552 * and hang if the HWS is placed at the top of the
553 * GTT. To generalise, it appears that all !llc
554 * platforms have issues with us placing the HWS
555 * above the mappable region (even though we never
558 flags |= PIN_MAPPABLE;
562 return i915_vma_pin(vma, 0, 0, flags);
565 static int init_status_page(struct intel_engine_cs *engine)
567 struct drm_i915_gem_object *obj;
568 struct i915_vma *vma;
573 * Though the HWS register does support 36bit addresses, historically
574 * we have had hangs and corruption reported due to wild writes if
575 * the HWS is placed above 4G. We only allow objects to be allocated
576 * in GFP_DMA32 for i965, and no earlier physical address users had
577 * access to more than 4G.
579 obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
581 DRM_ERROR("Failed to allocate status page\n");
585 i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
587 vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
593 vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
595 ret = PTR_ERR(vaddr);
599 engine->status_page.addr = memset(vaddr, 0, PAGE_SIZE);
600 engine->status_page.vma = vma;
602 if (!HWS_NEEDS_PHYSICAL(engine->i915)) {
603 ret = pin_ggtt_status_page(engine, vma);
611 i915_gem_object_unpin_map(obj);
613 i915_gem_object_put(obj);
617 static int intel_engine_setup_common(struct intel_engine_cs *engine)
621 init_llist_head(&engine->barrier_tasks);
623 err = init_status_page(engine);
627 intel_engine_init_active(engine, ENGINE_PHYSICAL);
628 intel_engine_init_breadcrumbs(engine);
629 intel_engine_init_execlists(engine);
630 intel_engine_init_hangcheck(engine);
631 intel_engine_init_batch_pool(engine);
632 intel_engine_init_cmd_parser(engine);
633 intel_engine_init__pm(engine);
635 /* Use the whole device by default */
637 intel_sseu_from_device_info(&RUNTIME_INFO(engine->i915)->sseu);
643 * intel_engines_setup- setup engine state not requiring hw access
644 * @i915: Device to setup.
646 * Initializes engine structure members shared between legacy and execlists
647 * submission modes which do not require hardware access.
649 * Typically done early in the submission mode specific engine setup stage.
651 int intel_engines_setup(struct drm_i915_private *i915)
653 int (*setup)(struct intel_engine_cs *engine);
654 struct intel_engine_cs *engine;
655 enum intel_engine_id id;
658 if (HAS_EXECLISTS(i915))
659 setup = intel_execlists_submission_setup;
661 setup = intel_ring_submission_setup;
663 for_each_engine(engine, i915, id) {
664 err = intel_engine_setup_common(engine);
672 /* We expect the backend to take control over its state */
673 GEM_BUG_ON(engine->destroy == (typeof(engine->destroy))kfree);
675 GEM_BUG_ON(!engine->cops);
681 intel_engines_cleanup(i915);
685 void intel_engines_set_scheduler_caps(struct drm_i915_private *i915)
687 static const struct {
691 #define MAP(x, y) { ilog2(I915_ENGINE_HAS_##x), ilog2(I915_SCHEDULER_CAP_##y) }
692 MAP(PREEMPTION, PREEMPTION),
693 MAP(SEMAPHORES, SEMAPHORES),
696 struct intel_engine_cs *engine;
697 enum intel_engine_id id;
698 u32 enabled, disabled;
702 for_each_engine(engine, i915, id) { /* all engines must agree! */
705 if (engine->schedule)
706 enabled |= (I915_SCHEDULER_CAP_ENABLED |
707 I915_SCHEDULER_CAP_PRIORITY);
709 disabled |= (I915_SCHEDULER_CAP_ENABLED |
710 I915_SCHEDULER_CAP_PRIORITY);
712 for (i = 0; i < ARRAY_SIZE(map); i++) {
713 if (engine->flags & BIT(map[i].engine))
714 enabled |= BIT(map[i].sched);
716 disabled |= BIT(map[i].sched);
720 i915->caps.scheduler = enabled & ~disabled;
721 if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_ENABLED))
722 i915->caps.scheduler = 0;
725 struct measure_breadcrumb {
726 struct i915_request rq;
727 struct i915_timeline timeline;
728 struct intel_ring ring;
732 static int measure_breadcrumb_dw(struct intel_engine_cs *engine)
734 struct measure_breadcrumb *frame;
737 GEM_BUG_ON(!engine->i915->gt.scratch);
739 frame = kzalloc(sizeof(*frame), GFP_KERNEL);
743 if (i915_timeline_init(&frame->timeline,
745 engine->status_page.vma))
748 INIT_LIST_HEAD(&frame->ring.request_list);
749 frame->ring.timeline = &frame->timeline;
750 frame->ring.vaddr = frame->cs;
751 frame->ring.size = sizeof(frame->cs);
752 frame->ring.effective_size = frame->ring.size;
753 intel_ring_update_space(&frame->ring);
755 frame->rq.i915 = engine->i915;
756 frame->rq.engine = engine;
757 frame->rq.ring = &frame->ring;
758 frame->rq.timeline = &frame->timeline;
760 dw = i915_timeline_pin(&frame->timeline);
764 dw = engine->emit_fini_breadcrumb(&frame->rq, frame->cs) - frame->cs;
765 GEM_BUG_ON(dw & 1); /* RING_TAIL must be qword aligned */
767 i915_timeline_unpin(&frame->timeline);
770 i915_timeline_fini(&frame->timeline);
776 static int pin_context(struct i915_gem_context *ctx,
777 struct intel_engine_cs *engine,
778 struct intel_context **out)
780 struct intel_context *ce;
783 ce = i915_gem_context_get_engine(ctx, engine->id);
787 err = intel_context_pin(ce);
788 intel_context_put(ce);
797 intel_engine_init_active(struct intel_engine_cs *engine, unsigned int subclass)
799 INIT_LIST_HEAD(&engine->active.requests);
801 spin_lock_init(&engine->active.lock);
802 lockdep_set_subclass(&engine->active.lock, subclass);
805 * Due to an interesting quirk in lockdep's internal debug tracking,
806 * after setting a subclass we must ensure the lock is used. Otherwise,
807 * nr_unused_locks is incremented once too often.
809 #ifdef CONFIG_DEBUG_LOCK_ALLOC
811 lock_map_acquire(&engine->active.lock.dep_map);
812 lock_map_release(&engine->active.lock.dep_map);
818 * intel_engines_init_common - initialize cengine state which might require hw access
819 * @engine: Engine to initialize.
821 * Initializes @engine@ structure members shared between legacy and execlists
822 * submission modes which do require hardware access.
824 * Typcally done at later stages of submission mode specific engine setup.
826 * Returns zero on success or an error code on failure.
828 int intel_engine_init_common(struct intel_engine_cs *engine)
830 struct drm_i915_private *i915 = engine->i915;
833 /* We may need to do things with the shrinker which
834 * require us to immediately switch back to the default
835 * context. This can cause a problem as pinning the
836 * default context also requires GTT space which may not
837 * be available. To avoid this we always pin the default
840 ret = pin_context(i915->kernel_context, engine,
841 &engine->kernel_context);
846 * Similarly the preempt context must always be available so that
847 * we can interrupt the engine at any time. However, as preemption
848 * is optional, we allow it to fail.
850 if (i915->preempt_context)
851 pin_context(i915->preempt_context, engine,
852 &engine->preempt_context);
854 ret = measure_breadcrumb_dw(engine);
858 engine->emit_fini_breadcrumb_dw = ret;
860 engine->set_default_submission(engine);
865 if (engine->preempt_context)
866 intel_context_unpin(engine->preempt_context);
867 intel_context_unpin(engine->kernel_context);
872 * intel_engines_cleanup_common - cleans up the engine state created by
873 * the common initiailizers.
874 * @engine: Engine to cleanup.
876 * This cleans up everything created by the common helpers.
878 void intel_engine_cleanup_common(struct intel_engine_cs *engine)
880 GEM_BUG_ON(!list_empty(&engine->active.requests));
882 cleanup_status_page(engine);
884 intel_engine_fini_breadcrumbs(engine);
885 intel_engine_cleanup_cmd_parser(engine);
886 i915_gem_batch_pool_fini(&engine->batch_pool);
888 if (engine->default_state)
889 i915_gem_object_put(engine->default_state);
891 if (engine->preempt_context)
892 intel_context_unpin(engine->preempt_context);
893 intel_context_unpin(engine->kernel_context);
894 GEM_BUG_ON(!llist_empty(&engine->barrier_tasks));
896 intel_wa_list_free(&engine->ctx_wa_list);
897 intel_wa_list_free(&engine->wa_list);
898 intel_wa_list_free(&engine->whitelist);
901 u64 intel_engine_get_active_head(const struct intel_engine_cs *engine)
903 struct drm_i915_private *i915 = engine->i915;
907 if (INTEL_GEN(i915) >= 8)
908 acthd = ENGINE_READ64(engine, RING_ACTHD, RING_ACTHD_UDW);
909 else if (INTEL_GEN(i915) >= 4)
910 acthd = ENGINE_READ(engine, RING_ACTHD);
912 acthd = ENGINE_READ(engine, ACTHD);
917 u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine)
921 if (INTEL_GEN(engine->i915) >= 8)
922 bbaddr = ENGINE_READ64(engine, RING_BBADDR, RING_BBADDR_UDW);
924 bbaddr = ENGINE_READ(engine, RING_BBADDR);
929 int intel_engine_stop_cs(struct intel_engine_cs *engine)
931 struct intel_uncore *uncore = engine->uncore;
932 const u32 base = engine->mmio_base;
933 const i915_reg_t mode = RING_MI_MODE(base);
936 if (INTEL_GEN(engine->i915) < 3)
939 GEM_TRACE("%s\n", engine->name);
941 intel_uncore_write_fw(uncore, mode, _MASKED_BIT_ENABLE(STOP_RING));
944 if (__intel_wait_for_register_fw(uncore,
945 mode, MODE_IDLE, MODE_IDLE,
948 GEM_TRACE("%s: timed out on STOP_RING -> IDLE\n", engine->name);
952 /* A final mmio read to let GPU writes be hopefully flushed to memory */
953 intel_uncore_posting_read_fw(uncore, mode);
958 void intel_engine_cancel_stop_cs(struct intel_engine_cs *engine)
960 GEM_TRACE("%s\n", engine->name);
962 ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
965 const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
968 case I915_CACHE_NONE: return " uncached";
969 case I915_CACHE_LLC: return HAS_LLC(i915) ? " LLC" : " snooped";
970 case I915_CACHE_L3_LLC: return " L3+LLC";
971 case I915_CACHE_WT: return " WT";
976 u32 intel_calculate_mcr_s_ss_select(struct drm_i915_private *dev_priv)
978 const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
980 u32 slice = fls(sseu->slice_mask);
981 u32 subslice = fls(sseu->subslice_mask[slice]);
983 if (IS_GEN(dev_priv, 10))
984 mcr_s_ss_select = GEN8_MCR_SLICE(slice) |
985 GEN8_MCR_SUBSLICE(subslice);
986 else if (INTEL_GEN(dev_priv) >= 11)
987 mcr_s_ss_select = GEN11_MCR_SLICE(slice) |
988 GEN11_MCR_SUBSLICE(subslice);
992 return mcr_s_ss_select;
996 read_subslice_reg(struct intel_engine_cs *engine, int slice, int subslice,
999 struct drm_i915_private *i915 = engine->i915;
1000 struct intel_uncore *uncore = engine->uncore;
1001 u32 mcr_slice_subslice_mask;
1002 u32 mcr_slice_subslice_select;
1003 u32 default_mcr_s_ss_select;
1006 enum forcewake_domains fw_domains;
1008 if (INTEL_GEN(i915) >= 11) {
1009 mcr_slice_subslice_mask = GEN11_MCR_SLICE_MASK |
1010 GEN11_MCR_SUBSLICE_MASK;
1011 mcr_slice_subslice_select = GEN11_MCR_SLICE(slice) |
1012 GEN11_MCR_SUBSLICE(subslice);
1014 mcr_slice_subslice_mask = GEN8_MCR_SLICE_MASK |
1015 GEN8_MCR_SUBSLICE_MASK;
1016 mcr_slice_subslice_select = GEN8_MCR_SLICE(slice) |
1017 GEN8_MCR_SUBSLICE(subslice);
1020 default_mcr_s_ss_select = intel_calculate_mcr_s_ss_select(i915);
1022 fw_domains = intel_uncore_forcewake_for_reg(uncore, reg,
1024 fw_domains |= intel_uncore_forcewake_for_reg(uncore,
1026 FW_REG_READ | FW_REG_WRITE);
1028 spin_lock_irq(&uncore->lock);
1029 intel_uncore_forcewake_get__locked(uncore, fw_domains);
1031 mcr = intel_uncore_read_fw(uncore, GEN8_MCR_SELECTOR);
1033 WARN_ON_ONCE((mcr & mcr_slice_subslice_mask) !=
1034 default_mcr_s_ss_select);
1036 mcr &= ~mcr_slice_subslice_mask;
1037 mcr |= mcr_slice_subslice_select;
1038 intel_uncore_write_fw(uncore, GEN8_MCR_SELECTOR, mcr);
1040 ret = intel_uncore_read_fw(uncore, reg);
1042 mcr &= ~mcr_slice_subslice_mask;
1043 mcr |= default_mcr_s_ss_select;
1045 intel_uncore_write_fw(uncore, GEN8_MCR_SELECTOR, mcr);
1047 intel_uncore_forcewake_put__locked(uncore, fw_domains);
1048 spin_unlock_irq(&uncore->lock);
1053 /* NB: please notice the memset */
1054 void intel_engine_get_instdone(struct intel_engine_cs *engine,
1055 struct intel_instdone *instdone)
1057 struct drm_i915_private *i915 = engine->i915;
1058 struct intel_uncore *uncore = engine->uncore;
1059 u32 mmio_base = engine->mmio_base;
1063 memset(instdone, 0, sizeof(*instdone));
1065 switch (INTEL_GEN(i915)) {
1067 instdone->instdone =
1068 intel_uncore_read(uncore, RING_INSTDONE(mmio_base));
1070 if (engine->id != RCS0)
1073 instdone->slice_common =
1074 intel_uncore_read(uncore, GEN7_SC_INSTDONE);
1075 for_each_instdone_slice_subslice(i915, slice, subslice) {
1076 instdone->sampler[slice][subslice] =
1077 read_subslice_reg(engine, slice, subslice,
1078 GEN7_SAMPLER_INSTDONE);
1079 instdone->row[slice][subslice] =
1080 read_subslice_reg(engine, slice, subslice,
1085 instdone->instdone =
1086 intel_uncore_read(uncore, RING_INSTDONE(mmio_base));
1088 if (engine->id != RCS0)
1091 instdone->slice_common =
1092 intel_uncore_read(uncore, GEN7_SC_INSTDONE);
1093 instdone->sampler[0][0] =
1094 intel_uncore_read(uncore, GEN7_SAMPLER_INSTDONE);
1095 instdone->row[0][0] =
1096 intel_uncore_read(uncore, GEN7_ROW_INSTDONE);
1102 instdone->instdone =
1103 intel_uncore_read(uncore, RING_INSTDONE(mmio_base));
1104 if (engine->id == RCS0)
1105 /* HACK: Using the wrong struct member */
1106 instdone->slice_common =
1107 intel_uncore_read(uncore, GEN4_INSTDONE1);
1111 instdone->instdone = intel_uncore_read(uncore, GEN2_INSTDONE);
1116 static bool ring_is_idle(struct intel_engine_cs *engine)
1118 struct drm_i915_private *dev_priv = engine->i915;
1119 intel_wakeref_t wakeref;
1122 if (I915_SELFTEST_ONLY(!engine->mmio_base))
1125 /* If the whole device is asleep, the engine must be idle */
1126 wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm);
1130 /* First check that no commands are left in the ring */
1131 if ((ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR) !=
1132 (ENGINE_READ(engine, RING_TAIL) & TAIL_ADDR))
1135 /* No bit for gen2, so assume the CS parser is idle */
1136 if (INTEL_GEN(dev_priv) > 2 &&
1137 !(ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE))
1140 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1146 * intel_engine_is_idle() - Report if the engine has finished process all work
1147 * @engine: the intel_engine_cs
1149 * Return true if there are no requests pending, nothing left to be submitted
1150 * to hardware, and that the engine is idle.
1152 bool intel_engine_is_idle(struct intel_engine_cs *engine)
1154 /* More white lies, if wedged, hw state is inconsistent */
1155 if (i915_reset_failed(engine->i915))
1158 if (!intel_wakeref_active(&engine->wakeref))
1161 /* Waiting to drain ELSP? */
1162 if (execlists_active(&engine->execlists)) {
1163 struct tasklet_struct *t = &engine->execlists.tasklet;
1165 synchronize_hardirq(engine->i915->drm.irq);
1168 if (tasklet_trylock(t)) {
1169 /* Must wait for any GPU reset in progress. */
1170 if (__tasklet_is_enabled(t))
1176 /* Otherwise flush the tasklet if it was on another cpu */
1177 tasklet_unlock_wait(t);
1179 if (execlists_active(&engine->execlists))
1183 /* ELSP is empty, but there are ready requests? E.g. after reset */
1184 if (!RB_EMPTY_ROOT(&engine->execlists.queue.rb_root))
1188 return ring_is_idle(engine);
1191 bool intel_engines_are_idle(struct drm_i915_private *i915)
1193 struct intel_engine_cs *engine;
1194 enum intel_engine_id id;
1197 * If the driver is wedged, HW state may be very inconsistent and
1198 * report that it is still busy, even though we have stopped using it.
1200 if (i915_reset_failed(i915))
1203 /* Already parked (and passed an idleness test); must still be idle */
1204 if (!READ_ONCE(i915->gt.awake))
1207 for_each_engine(engine, i915, id) {
1208 if (!intel_engine_is_idle(engine))
1215 void intel_engines_reset_default_submission(struct drm_i915_private *i915)
1217 struct intel_engine_cs *engine;
1218 enum intel_engine_id id;
1220 for_each_engine(engine, i915, id)
1221 engine->set_default_submission(engine);
1224 bool intel_engine_can_store_dword(struct intel_engine_cs *engine)
1226 switch (INTEL_GEN(engine->i915)) {
1228 return false; /* uses physical not virtual addresses */
1230 /* maybe only uses physical not virtual addresses */
1231 return !(IS_I915G(engine->i915) || IS_I915GM(engine->i915));
1233 return engine->class != VIDEO_DECODE_CLASS; /* b0rked */
1239 unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915)
1241 struct intel_engine_cs *engine;
1242 enum intel_engine_id id;
1246 for_each_engine(engine, i915, id)
1247 if (engine->default_state)
1248 which |= BIT(engine->uabi_class);
1253 static int print_sched_attr(struct drm_i915_private *i915,
1254 const struct i915_sched_attr *attr,
1255 char *buf, int x, int len)
1257 if (attr->priority == I915_PRIORITY_INVALID)
1260 x += snprintf(buf + x, len - x,
1261 " prio=%d", attr->priority);
1266 static void print_request(struct drm_printer *m,
1267 struct i915_request *rq,
1270 const char *name = rq->fence.ops->get_timeline_name(&rq->fence);
1274 x = print_sched_attr(rq->i915, &rq->sched.attr, buf, x, sizeof(buf));
1276 drm_printf(m, "%s %llx:%llx%s%s %s @ %dms: %s\n",
1278 rq->fence.context, rq->fence.seqno,
1279 i915_request_completed(rq) ? "!" :
1280 i915_request_started(rq) ? "*" :
1282 test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
1283 &rq->fence.flags) ? "+" :
1284 test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
1285 &rq->fence.flags) ? "-" :
1288 jiffies_to_msecs(jiffies - rq->emitted_jiffies),
1292 static void hexdump(struct drm_printer *m, const void *buf, size_t len)
1294 const size_t rowsize = 8 * sizeof(u32);
1295 const void *prev = NULL;
1299 for (pos = 0; pos < len; pos += rowsize) {
1302 if (prev && !memcmp(prev, buf + pos, rowsize)) {
1304 drm_printf(m, "*\n");
1310 WARN_ON_ONCE(hex_dump_to_buffer(buf + pos, len - pos,
1311 rowsize, sizeof(u32),
1313 false) >= sizeof(line));
1314 drm_printf(m, "[%04zx] %s\n", pos, line);
1321 static void intel_engine_print_registers(struct intel_engine_cs *engine,
1322 struct drm_printer *m)
1324 struct drm_i915_private *dev_priv = engine->i915;
1325 const struct intel_engine_execlists * const execlists =
1327 unsigned long flags;
1330 if (engine->id == RCS0 && IS_GEN_RANGE(dev_priv, 4, 7))
1331 drm_printf(m, "\tCCID: 0x%08x\n", ENGINE_READ(engine, CCID));
1332 drm_printf(m, "\tRING_START: 0x%08x\n",
1333 ENGINE_READ(engine, RING_START));
1334 drm_printf(m, "\tRING_HEAD: 0x%08x\n",
1335 ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR);
1336 drm_printf(m, "\tRING_TAIL: 0x%08x\n",
1337 ENGINE_READ(engine, RING_TAIL) & TAIL_ADDR);
1338 drm_printf(m, "\tRING_CTL: 0x%08x%s\n",
1339 ENGINE_READ(engine, RING_CTL),
1340 ENGINE_READ(engine, RING_CTL) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? " [waiting]" : "");
1341 if (INTEL_GEN(engine->i915) > 2) {
1342 drm_printf(m, "\tRING_MODE: 0x%08x%s\n",
1343 ENGINE_READ(engine, RING_MI_MODE),
1344 ENGINE_READ(engine, RING_MI_MODE) & (MODE_IDLE) ? " [idle]" : "");
1347 if (INTEL_GEN(dev_priv) >= 6) {
1348 drm_printf(m, "\tRING_IMR: %08x\n",
1349 ENGINE_READ(engine, RING_IMR));
1352 addr = intel_engine_get_active_head(engine);
1353 drm_printf(m, "\tACTHD: 0x%08x_%08x\n",
1354 upper_32_bits(addr), lower_32_bits(addr));
1355 addr = intel_engine_get_last_batch_head(engine);
1356 drm_printf(m, "\tBBADDR: 0x%08x_%08x\n",
1357 upper_32_bits(addr), lower_32_bits(addr));
1358 if (INTEL_GEN(dev_priv) >= 8)
1359 addr = ENGINE_READ64(engine, RING_DMA_FADD, RING_DMA_FADD_UDW);
1360 else if (INTEL_GEN(dev_priv) >= 4)
1361 addr = ENGINE_READ(engine, RING_DMA_FADD);
1363 addr = ENGINE_READ(engine, DMA_FADD_I8XX);
1364 drm_printf(m, "\tDMA_FADDR: 0x%08x_%08x\n",
1365 upper_32_bits(addr), lower_32_bits(addr));
1366 if (INTEL_GEN(dev_priv) >= 4) {
1367 drm_printf(m, "\tIPEIR: 0x%08x\n",
1368 ENGINE_READ(engine, RING_IPEIR));
1369 drm_printf(m, "\tIPEHR: 0x%08x\n",
1370 ENGINE_READ(engine, RING_IPEHR));
1372 drm_printf(m, "\tIPEIR: 0x%08x\n", ENGINE_READ(engine, IPEIR));
1373 drm_printf(m, "\tIPEHR: 0x%08x\n", ENGINE_READ(engine, IPEHR));
1376 if (HAS_EXECLISTS(dev_priv)) {
1377 struct i915_request * const *port, *rq;
1379 &engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX];
1380 const u8 num_entries = execlists->csb_size;
1384 drm_printf(m, "\tExeclist status: 0x%08x %08x, entries %u\n",
1385 ENGINE_READ(engine, RING_EXECLIST_STATUS_LO),
1386 ENGINE_READ(engine, RING_EXECLIST_STATUS_HI),
1389 read = execlists->csb_head;
1390 write = READ_ONCE(*execlists->csb_write);
1392 drm_printf(m, "\tExeclist CSB read %d, write %d, tasklet queued? %s (%s)\n",
1394 yesno(test_bit(TASKLET_STATE_SCHED,
1395 &engine->execlists.tasklet.state)),
1396 enableddisabled(!atomic_read(&engine->execlists.tasklet.count)));
1397 if (read >= num_entries)
1399 if (write >= num_entries)
1402 write += num_entries;
1403 while (read < write) {
1404 idx = ++read % num_entries;
1405 drm_printf(m, "\tExeclist CSB[%d]: 0x%08x, context: %d\n",
1406 idx, hws[idx * 2], hws[idx * 2 + 1]);
1409 spin_lock_irqsave(&engine->active.lock, flags);
1410 for (port = execlists->active; (rq = *port); port++) {
1414 len = snprintf(hdr, sizeof(hdr),
1416 (int)(port - execlists->active));
1417 if (!i915_request_signaled(rq))
1418 len += snprintf(hdr + len, sizeof(hdr) - len,
1419 "ring:{start:%08x, hwsp:%08x, seqno:%08x}, ",
1420 i915_ggtt_offset(rq->ring->vma),
1421 rq->timeline->hwsp_offset,
1423 snprintf(hdr + len, sizeof(hdr) - len, "rq: ");
1424 print_request(m, rq, hdr);
1426 for (port = execlists->pending; (rq = *port); port++) {
1429 snprintf(hdr, sizeof(hdr),
1430 "\t\tPending[%d] ring:{start:%08x, hwsp:%08x, seqno:%08x}, rq: ",
1431 (int)(port - execlists->pending),
1432 i915_ggtt_offset(rq->ring->vma),
1433 rq->timeline->hwsp_offset,
1435 print_request(m, rq, hdr);
1437 spin_unlock_irqrestore(&engine->active.lock, flags);
1438 } else if (INTEL_GEN(dev_priv) > 6) {
1439 drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
1440 ENGINE_READ(engine, RING_PP_DIR_BASE));
1441 drm_printf(m, "\tPP_DIR_BASE_READ: 0x%08x\n",
1442 ENGINE_READ(engine, RING_PP_DIR_BASE_READ));
1443 drm_printf(m, "\tPP_DIR_DCLV: 0x%08x\n",
1444 ENGINE_READ(engine, RING_PP_DIR_DCLV));
1448 static void print_request_ring(struct drm_printer *m, struct i915_request *rq)
1454 "[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]:\n",
1455 rq->head, rq->postfix, rq->tail,
1456 rq->batch ? upper_32_bits(rq->batch->node.start) : ~0u,
1457 rq->batch ? lower_32_bits(rq->batch->node.start) : ~0u);
1459 size = rq->tail - rq->head;
1460 if (rq->tail < rq->head)
1461 size += rq->ring->size;
1463 ring = kmalloc(size, GFP_ATOMIC);
1465 const void *vaddr = rq->ring->vaddr;
1466 unsigned int head = rq->head;
1467 unsigned int len = 0;
1469 if (rq->tail < head) {
1470 len = rq->ring->size - head;
1471 memcpy(ring, vaddr + head, len);
1474 memcpy(ring + len, vaddr + head, size - len);
1476 hexdump(m, ring, size);
1481 void intel_engine_dump(struct intel_engine_cs *engine,
1482 struct drm_printer *m,
1483 const char *header, ...)
1485 struct i915_gpu_error * const error = &engine->i915->gpu_error;
1486 struct i915_request *rq;
1487 intel_wakeref_t wakeref;
1492 va_start(ap, header);
1493 drm_vprintf(m, header, &ap);
1497 if (i915_reset_failed(engine->i915))
1498 drm_printf(m, "*** WEDGED ***\n");
1500 drm_printf(m, "\tAwake? %d\n", atomic_read(&engine->wakeref.count));
1501 drm_printf(m, "\tHangcheck: %d ms ago\n",
1502 jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp));
1503 drm_printf(m, "\tReset count: %d (global %d)\n",
1504 i915_reset_engine_count(error, engine),
1505 i915_reset_count(error));
1509 drm_printf(m, "\tRequests:\n");
1511 rq = intel_engine_find_active_request(engine);
1513 print_request(m, rq, "\t\tactive ");
1515 drm_printf(m, "\t\tring->start: 0x%08x\n",
1516 i915_ggtt_offset(rq->ring->vma));
1517 drm_printf(m, "\t\tring->head: 0x%08x\n",
1519 drm_printf(m, "\t\tring->tail: 0x%08x\n",
1521 drm_printf(m, "\t\tring->emit: 0x%08x\n",
1523 drm_printf(m, "\t\tring->space: 0x%08x\n",
1525 drm_printf(m, "\t\tring->hwsp: 0x%08x\n",
1526 rq->timeline->hwsp_offset);
1528 print_request_ring(m, rq);
1533 wakeref = intel_runtime_pm_get_if_in_use(&engine->i915->runtime_pm);
1535 intel_engine_print_registers(engine, m);
1536 intel_runtime_pm_put(&engine->i915->runtime_pm, wakeref);
1538 drm_printf(m, "\tDevice is asleep; skipping register dump\n");
1541 intel_execlists_show_requests(engine, m, print_request, 8);
1543 drm_printf(m, "HWSP:\n");
1544 hexdump(m, engine->status_page.addr, PAGE_SIZE);
1546 drm_printf(m, "Idle? %s\n", yesno(intel_engine_is_idle(engine)));
1548 intel_engine_print_breadcrumbs(engine, m);
1551 static u8 user_class_map[] = {
1552 [I915_ENGINE_CLASS_RENDER] = RENDER_CLASS,
1553 [I915_ENGINE_CLASS_COPY] = COPY_ENGINE_CLASS,
1554 [I915_ENGINE_CLASS_VIDEO] = VIDEO_DECODE_CLASS,
1555 [I915_ENGINE_CLASS_VIDEO_ENHANCE] = VIDEO_ENHANCEMENT_CLASS,
1558 struct intel_engine_cs *
1559 intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance)
1561 if (class >= ARRAY_SIZE(user_class_map))
1564 class = user_class_map[class];
1566 GEM_BUG_ON(class > MAX_ENGINE_CLASS);
1568 if (instance > MAX_ENGINE_INSTANCE)
1571 return i915->engine_class[class][instance];
1575 * intel_enable_engine_stats() - Enable engine busy tracking on engine
1576 * @engine: engine to enable stats collection
1578 * Start collecting the engine busyness data for @engine.
1580 * Returns 0 on success or a negative error code.
1582 int intel_enable_engine_stats(struct intel_engine_cs *engine)
1584 struct intel_engine_execlists *execlists = &engine->execlists;
1585 unsigned long flags;
1588 if (!intel_engine_supports_stats(engine))
1591 spin_lock_irqsave(&engine->active.lock, flags);
1592 write_seqlock(&engine->stats.lock);
1594 if (unlikely(engine->stats.enabled == ~0)) {
1599 if (engine->stats.enabled++ == 0) {
1600 struct i915_request * const *port;
1601 struct i915_request *rq;
1603 engine->stats.enabled_at = ktime_get();
1605 /* XXX submission method oblivious? */
1606 for (port = execlists->active; (rq = *port); port++)
1607 engine->stats.active++;
1609 for (port = execlists->pending; (rq = *port); port++) {
1610 /* Exclude any contexts already counted in active */
1611 if (intel_context_inflight_count(rq->hw_context) == 1)
1612 engine->stats.active++;
1615 if (engine->stats.active)
1616 engine->stats.start = engine->stats.enabled_at;
1620 write_sequnlock(&engine->stats.lock);
1621 spin_unlock_irqrestore(&engine->active.lock, flags);
1626 static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine)
1628 ktime_t total = engine->stats.total;
1631 * If the engine is executing something at the moment
1632 * add it to the total.
1634 if (engine->stats.active)
1635 total = ktime_add(total,
1636 ktime_sub(ktime_get(), engine->stats.start));
1642 * intel_engine_get_busy_time() - Return current accumulated engine busyness
1643 * @engine: engine to report on
1645 * Returns accumulated time @engine was busy since engine stats were enabled.
1647 ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine)
1653 seq = read_seqbegin(&engine->stats.lock);
1654 total = __intel_engine_get_busy_time(engine);
1655 } while (read_seqretry(&engine->stats.lock, seq));
1661 * intel_disable_engine_stats() - Disable engine busy tracking on engine
1662 * @engine: engine to disable stats collection
1664 * Stops collecting the engine busyness data for @engine.
1666 void intel_disable_engine_stats(struct intel_engine_cs *engine)
1668 unsigned long flags;
1670 if (!intel_engine_supports_stats(engine))
1673 write_seqlock_irqsave(&engine->stats.lock, flags);
1674 WARN_ON_ONCE(engine->stats.enabled == 0);
1675 if (--engine->stats.enabled == 0) {
1676 engine->stats.total = __intel_engine_get_busy_time(engine);
1677 engine->stats.active = 0;
1679 write_sequnlock_irqrestore(&engine->stats.lock, flags);
1682 static bool match_ring(struct i915_request *rq)
1684 u32 ring = ENGINE_READ(rq->engine, RING_START);
1686 return ring == i915_ggtt_offset(rq->ring->vma);
1689 struct i915_request *
1690 intel_engine_find_active_request(struct intel_engine_cs *engine)
1692 struct i915_request *request, *active = NULL;
1693 unsigned long flags;
1696 * We are called by the error capture, reset and to dump engine
1697 * state at random points in time. In particular, note that neither is
1698 * crucially ordered with an interrupt. After a hang, the GPU is dead
1699 * and we assume that no more writes can happen (we waited long enough
1700 * for all writes that were in transaction to be flushed) - adding an
1701 * extra delay for a recent interrupt is pointless. Hence, we do
1702 * not need an engine->irq_seqno_barrier() before the seqno reads.
1703 * At all other times, we must assume the GPU is still running, but
1704 * we only care about the snapshot of this moment.
1706 spin_lock_irqsave(&engine->active.lock, flags);
1707 list_for_each_entry(request, &engine->active.requests, sched.link) {
1708 if (i915_request_completed(request))
1711 if (!i915_request_started(request))
1714 /* More than one preemptible request may match! */
1715 if (!match_ring(request))
1721 spin_unlock_irqrestore(&engine->active.lock, flags);
1726 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1727 #include "selftest_engine_cs.c"