2 * Copyright © 2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include <drm/drm_print.h>
28 #include "i915_reset.h"
29 #include "intel_ringbuffer.h"
30 #include "intel_lrc.h"
32 /* Haswell does have the CXT_SIZE register however it does not appear to be
33 * valid. Now, docs explain in dwords what is in the context object. The full
34 * size is 70720 bytes, however, the power context and execlist context will
35 * never be saved (power context is stored elsewhere, and execlists don't work
36 * on HSW) - so the final size, including the extra state required for the
37 * Resource Streamer, is 66944 bytes, which rounds to 17 pages.
39 #define HSW_CXT_TOTAL_SIZE (17 * PAGE_SIZE)
41 #define DEFAULT_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
42 #define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE)
43 #define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
44 #define GEN10_LR_CONTEXT_RENDER_SIZE (18 * PAGE_SIZE)
45 #define GEN11_LR_CONTEXT_RENDER_SIZE (14 * PAGE_SIZE)
47 #define GEN8_LR_CONTEXT_OTHER_SIZE ( 2 * PAGE_SIZE)
49 struct engine_class_info {
51 int (*init_legacy)(struct intel_engine_cs *engine);
52 int (*init_execlists)(struct intel_engine_cs *engine);
57 static const struct engine_class_info intel_engine_classes[] = {
60 .init_execlists = logical_render_ring_init,
61 .init_legacy = intel_init_render_ring_buffer,
62 .uabi_class = I915_ENGINE_CLASS_RENDER,
64 [COPY_ENGINE_CLASS] = {
66 .init_execlists = logical_xcs_ring_init,
67 .init_legacy = intel_init_blt_ring_buffer,
68 .uabi_class = I915_ENGINE_CLASS_COPY,
70 [VIDEO_DECODE_CLASS] = {
72 .init_execlists = logical_xcs_ring_init,
73 .init_legacy = intel_init_bsd_ring_buffer,
74 .uabi_class = I915_ENGINE_CLASS_VIDEO,
76 [VIDEO_ENHANCEMENT_CLASS] = {
78 .init_execlists = logical_xcs_ring_init,
79 .init_legacy = intel_init_vebox_ring_buffer,
80 .uabi_class = I915_ENGINE_CLASS_VIDEO_ENHANCE,
84 #define MAX_MMIO_BASES 3
89 /* mmio bases table *must* be sorted in reverse gen order */
90 struct engine_mmio_base {
93 } mmio_bases[MAX_MMIO_BASES];
96 static const struct engine_info intel_engines[] = {
99 .class = RENDER_CLASS,
102 { .gen = 1, .base = RENDER_RING_BASE }
107 .class = COPY_ENGINE_CLASS,
110 { .gen = 6, .base = BLT_RING_BASE }
115 .class = VIDEO_DECODE_CLASS,
118 { .gen = 11, .base = GEN11_BSD_RING_BASE },
119 { .gen = 6, .base = GEN6_BSD_RING_BASE },
120 { .gen = 4, .base = BSD_RING_BASE }
125 .class = VIDEO_DECODE_CLASS,
128 { .gen = 11, .base = GEN11_BSD2_RING_BASE },
129 { .gen = 8, .base = GEN8_BSD2_RING_BASE }
134 .class = VIDEO_DECODE_CLASS,
137 { .gen = 11, .base = GEN11_BSD3_RING_BASE }
142 .class = VIDEO_DECODE_CLASS,
145 { .gen = 11, .base = GEN11_BSD4_RING_BASE }
150 .class = VIDEO_ENHANCEMENT_CLASS,
153 { .gen = 11, .base = GEN11_VEBOX_RING_BASE },
154 { .gen = 7, .base = VEBOX_RING_BASE }
159 .class = VIDEO_ENHANCEMENT_CLASS,
162 { .gen = 11, .base = GEN11_VEBOX2_RING_BASE }
168 * ___intel_engine_context_size() - return the size of the context for an engine
169 * @dev_priv: i915 device private
170 * @class: engine class
172 * Each engine class may require a different amount of space for a context
175 * Return: size (in bytes) of an engine class specific context image
177 * Note: this size includes the HWSP, which is part of the context image
178 * in LRC mode, but does not include the "shared data page" used with
179 * GuC submission. The caller should account for this if using the GuC.
182 __intel_engine_context_size(struct drm_i915_private *dev_priv, u8 class)
186 BUILD_BUG_ON(I915_GTT_PAGE_SIZE != PAGE_SIZE);
190 switch (INTEL_GEN(dev_priv)) {
192 MISSING_CASE(INTEL_GEN(dev_priv));
193 return DEFAULT_LR_CONTEXT_RENDER_SIZE;
195 return GEN11_LR_CONTEXT_RENDER_SIZE;
197 return GEN10_LR_CONTEXT_RENDER_SIZE;
199 return GEN9_LR_CONTEXT_RENDER_SIZE;
201 return GEN8_LR_CONTEXT_RENDER_SIZE;
203 if (IS_HASWELL(dev_priv))
204 return HSW_CXT_TOTAL_SIZE;
206 cxt_size = I915_READ(GEN7_CXT_SIZE);
207 return round_up(GEN7_CXT_TOTAL_SIZE(cxt_size) * 64,
210 cxt_size = I915_READ(CXT_SIZE);
211 return round_up(GEN6_CXT_TOTAL_SIZE(cxt_size) * 64,
217 /* For the special day when i810 gets merged. */
225 case VIDEO_DECODE_CLASS:
226 case VIDEO_ENHANCEMENT_CLASS:
227 case COPY_ENGINE_CLASS:
228 if (INTEL_GEN(dev_priv) < 8)
230 return GEN8_LR_CONTEXT_OTHER_SIZE;
234 static u32 __engine_mmio_base(struct drm_i915_private *i915,
235 const struct engine_mmio_base *bases)
239 for (i = 0; i < MAX_MMIO_BASES; i++)
240 if (INTEL_GEN(i915) >= bases[i].gen)
243 GEM_BUG_ON(i == MAX_MMIO_BASES);
244 GEM_BUG_ON(!bases[i].base);
246 return bases[i].base;
249 static void __sprint_engine_name(char *name, const struct engine_info *info)
251 WARN_ON(snprintf(name, INTEL_ENGINE_CS_MAX_NAME, "%s%u",
252 intel_engine_classes[info->class].name,
253 info->instance) >= INTEL_ENGINE_CS_MAX_NAME);
256 void intel_engine_set_hwsp_writemask(struct intel_engine_cs *engine, u32 mask)
258 struct drm_i915_private *dev_priv = engine->i915;
262 * Though they added more rings on g4x/ilk, they did not add
263 * per-engine HWSTAM until gen6.
265 if (INTEL_GEN(dev_priv) < 6 && engine->class != RENDER_CLASS)
268 hwstam = RING_HWSTAM(engine->mmio_base);
269 if (INTEL_GEN(dev_priv) >= 3)
270 I915_WRITE(hwstam, mask);
272 I915_WRITE16(hwstam, mask);
275 static void intel_engine_sanitize_mmio(struct intel_engine_cs *engine)
277 /* Mask off all writes into the unknown HWSP */
278 intel_engine_set_hwsp_writemask(engine, ~0u);
282 intel_engine_setup(struct drm_i915_private *dev_priv,
283 enum intel_engine_id id)
285 const struct engine_info *info = &intel_engines[id];
286 struct intel_engine_cs *engine;
288 GEM_BUG_ON(info->class >= ARRAY_SIZE(intel_engine_classes));
290 BUILD_BUG_ON(MAX_ENGINE_CLASS >= BIT(GEN11_ENGINE_CLASS_WIDTH));
291 BUILD_BUG_ON(MAX_ENGINE_INSTANCE >= BIT(GEN11_ENGINE_INSTANCE_WIDTH));
293 if (GEM_DEBUG_WARN_ON(info->class > MAX_ENGINE_CLASS))
296 if (GEM_DEBUG_WARN_ON(info->instance > MAX_ENGINE_INSTANCE))
299 if (GEM_DEBUG_WARN_ON(dev_priv->engine_class[info->class][info->instance]))
302 GEM_BUG_ON(dev_priv->engine[id]);
303 engine = kzalloc(sizeof(*engine), GFP_KERNEL);
307 BUILD_BUG_ON(BITS_PER_TYPE(engine->mask) < I915_NUM_ENGINES);
310 engine->mask = BIT(id);
311 engine->i915 = dev_priv;
312 __sprint_engine_name(engine->name, info);
313 engine->hw_id = engine->guc_id = info->hw_id;
314 engine->mmio_base = __engine_mmio_base(dev_priv, info->mmio_bases);
315 engine->class = info->class;
316 engine->instance = info->instance;
318 engine->uabi_class = intel_engine_classes[info->class].uabi_class;
320 engine->context_size = __intel_engine_context_size(dev_priv,
322 if (WARN_ON(engine->context_size > BIT(20)))
323 engine->context_size = 0;
324 if (engine->context_size)
325 DRIVER_CAPS(dev_priv)->has_logical_contexts = true;
327 /* Nothing to do here, execute in order of dependencies */
328 engine->schedule = NULL;
330 seqlock_init(&engine->stats.lock);
332 ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier);
334 /* Scrub mmio state on takeover */
335 intel_engine_sanitize_mmio(engine);
337 dev_priv->engine_class[info->class][info->instance] = engine;
338 dev_priv->engine[id] = engine;
343 * intel_engines_init_mmio() - allocate and prepare the Engine Command Streamers
344 * @dev_priv: i915 device private
346 * Return: non-zero if the initialization failed.
348 int intel_engines_init_mmio(struct drm_i915_private *dev_priv)
350 struct intel_device_info *device_info = mkwrite_device_info(dev_priv);
351 const unsigned int engine_mask = INTEL_INFO(dev_priv)->engine_mask;
352 struct intel_engine_cs *engine;
353 enum intel_engine_id id;
354 unsigned int mask = 0;
358 WARN_ON(engine_mask == 0);
359 WARN_ON(engine_mask &
360 GENMASK(BITS_PER_TYPE(mask) - 1, I915_NUM_ENGINES));
362 if (i915_inject_load_failure())
365 for (i = 0; i < ARRAY_SIZE(intel_engines); i++) {
366 if (!HAS_ENGINE(dev_priv, i))
369 err = intel_engine_setup(dev_priv, i);
377 * Catch failures to update intel_engines table when the new engines
378 * are added to the driver by a warning and disabling the forgotten
381 if (WARN_ON(mask != engine_mask))
382 device_info->engine_mask = mask;
384 /* We always presume we have at least RCS available for later probing */
385 if (WARN_ON(!HAS_ENGINE(dev_priv, RCS0))) {
390 RUNTIME_INFO(dev_priv)->num_engines = hweight32(mask);
392 i915_check_and_clear_faults(dev_priv);
397 for_each_engine(engine, dev_priv, id)
403 * intel_engines_init() - init the Engine Command Streamers
404 * @dev_priv: i915 device private
406 * Return: non-zero if the initialization failed.
408 int intel_engines_init(struct drm_i915_private *dev_priv)
410 struct intel_engine_cs *engine;
411 enum intel_engine_id id, err_id;
414 for_each_engine(engine, dev_priv, id) {
415 const struct engine_class_info *class_info =
416 &intel_engine_classes[engine->class];
417 int (*init)(struct intel_engine_cs *engine);
419 if (HAS_EXECLISTS(dev_priv))
420 init = class_info->init_execlists;
422 init = class_info->init_legacy;
427 if (GEM_DEBUG_WARN_ON(!init))
434 GEM_BUG_ON(!engine->submit_request);
440 for_each_engine(engine, dev_priv, id) {
443 dev_priv->engine[id] = NULL;
445 dev_priv->gt.cleanup_engine(engine);
451 static void intel_engine_init_batch_pool(struct intel_engine_cs *engine)
453 i915_gem_batch_pool_init(&engine->batch_pool, engine);
456 static void intel_engine_init_execlist(struct intel_engine_cs *engine)
458 struct intel_engine_execlists * const execlists = &engine->execlists;
460 execlists->port_mask = 1;
461 GEM_BUG_ON(!is_power_of_2(execlists_num_ports(execlists)));
462 GEM_BUG_ON(execlists_num_ports(execlists) > EXECLIST_MAX_PORTS);
464 execlists->queue_priority_hint = INT_MIN;
465 execlists->queue = RB_ROOT_CACHED;
468 static void cleanup_status_page(struct intel_engine_cs *engine)
470 struct i915_vma *vma;
472 /* Prevent writes into HWSP after returning the page to the system */
473 intel_engine_set_hwsp_writemask(engine, ~0u);
475 vma = fetch_and_zero(&engine->status_page.vma);
479 if (!HWS_NEEDS_PHYSICAL(engine->i915))
482 i915_gem_object_unpin_map(vma->obj);
483 __i915_gem_object_release_unless_active(vma->obj);
486 static int pin_ggtt_status_page(struct intel_engine_cs *engine,
487 struct i915_vma *vma)
492 if (!HAS_LLC(engine->i915))
494 * On g33, we cannot place HWS above 256MiB, so
495 * restrict its pinning to the low mappable arena.
496 * Though this restriction is not documented for
497 * gen4, gen5, or byt, they also behave similarly
498 * and hang if the HWS is placed at the top of the
499 * GTT. To generalise, it appears that all !llc
500 * platforms have issues with us placing the HWS
501 * above the mappable region (even though we never
504 flags |= PIN_MAPPABLE;
508 return i915_vma_pin(vma, 0, 0, flags);
511 static int init_status_page(struct intel_engine_cs *engine)
513 struct drm_i915_gem_object *obj;
514 struct i915_vma *vma;
519 * Though the HWS register does support 36bit addresses, historically
520 * we have had hangs and corruption reported due to wild writes if
521 * the HWS is placed above 4G. We only allow objects to be allocated
522 * in GFP_DMA32 for i965, and no earlier physical address users had
523 * access to more than 4G.
525 obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
527 DRM_ERROR("Failed to allocate status page\n");
531 i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
533 vma = i915_vma_instance(obj, &engine->i915->ggtt.vm, NULL);
539 vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
541 ret = PTR_ERR(vaddr);
545 engine->status_page.addr = memset(vaddr, 0, PAGE_SIZE);
546 engine->status_page.vma = vma;
548 if (!HWS_NEEDS_PHYSICAL(engine->i915)) {
549 ret = pin_ggtt_status_page(engine, vma);
557 i915_gem_object_unpin_map(obj);
559 i915_gem_object_put(obj);
564 * intel_engines_setup_common - setup engine state not requiring hw access
565 * @engine: Engine to setup.
567 * Initializes @engine@ structure members shared between legacy and execlists
568 * submission modes which do not require hardware access.
570 * Typically done early in the submission mode specific engine setup stage.
572 int intel_engine_setup_common(struct intel_engine_cs *engine)
576 err = init_status_page(engine);
580 err = i915_timeline_init(engine->i915,
582 engine->status_page.vma);
586 i915_timeline_set_subclass(&engine->timeline, TIMELINE_ENGINE);
588 intel_engine_init_breadcrumbs(engine);
589 intel_engine_init_execlist(engine);
590 intel_engine_init_hangcheck(engine);
591 intel_engine_init_batch_pool(engine);
592 intel_engine_init_cmd_parser(engine);
597 cleanup_status_page(engine);
601 void intel_engines_set_scheduler_caps(struct drm_i915_private *i915)
603 static const struct {
607 #define MAP(x, y) { ilog2(I915_ENGINE_HAS_##x), ilog2(I915_SCHEDULER_CAP_##y) }
608 MAP(PREEMPTION, PREEMPTION),
609 MAP(SEMAPHORES, SEMAPHORES),
612 struct intel_engine_cs *engine;
613 enum intel_engine_id id;
614 u32 enabled, disabled;
618 for_each_engine(engine, i915, id) { /* all engines must agree! */
621 if (engine->schedule)
622 enabled |= (I915_SCHEDULER_CAP_ENABLED |
623 I915_SCHEDULER_CAP_PRIORITY);
625 disabled |= (I915_SCHEDULER_CAP_ENABLED |
626 I915_SCHEDULER_CAP_PRIORITY);
628 for (i = 0; i < ARRAY_SIZE(map); i++) {
629 if (engine->flags & BIT(map[i].engine))
630 enabled |= BIT(map[i].sched);
632 disabled |= BIT(map[i].sched);
636 i915->caps.scheduler = enabled & ~disabled;
637 if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_ENABLED))
638 i915->caps.scheduler = 0;
641 struct measure_breadcrumb {
642 struct i915_request rq;
643 struct i915_timeline timeline;
644 struct intel_ring ring;
648 static int measure_breadcrumb_dw(struct intel_engine_cs *engine)
650 struct measure_breadcrumb *frame;
653 GEM_BUG_ON(!engine->i915->gt.scratch);
655 frame = kzalloc(sizeof(*frame), GFP_KERNEL);
659 if (i915_timeline_init(engine->i915,
661 engine->status_page.vma))
664 INIT_LIST_HEAD(&frame->ring.request_list);
665 frame->ring.timeline = &frame->timeline;
666 frame->ring.vaddr = frame->cs;
667 frame->ring.size = sizeof(frame->cs);
668 frame->ring.effective_size = frame->ring.size;
669 intel_ring_update_space(&frame->ring);
671 frame->rq.i915 = engine->i915;
672 frame->rq.engine = engine;
673 frame->rq.ring = &frame->ring;
674 frame->rq.timeline = &frame->timeline;
676 dw = i915_timeline_pin(&frame->timeline);
680 dw = engine->emit_fini_breadcrumb(&frame->rq, frame->cs) - frame->cs;
682 i915_timeline_unpin(&frame->timeline);
685 i915_timeline_fini(&frame->timeline);
691 static int pin_context(struct i915_gem_context *ctx,
692 struct intel_engine_cs *engine,
693 struct intel_context **out)
695 struct intel_context *ce;
697 ce = intel_context_pin(ctx, engine);
706 * intel_engines_init_common - initialize cengine state which might require hw access
707 * @engine: Engine to initialize.
709 * Initializes @engine@ structure members shared between legacy and execlists
710 * submission modes which do require hardware access.
712 * Typcally done at later stages of submission mode specific engine setup.
714 * Returns zero on success or an error code on failure.
716 int intel_engine_init_common(struct intel_engine_cs *engine)
718 struct drm_i915_private *i915 = engine->i915;
721 /* We may need to do things with the shrinker which
722 * require us to immediately switch back to the default
723 * context. This can cause a problem as pinning the
724 * default context also requires GTT space which may not
725 * be available. To avoid this we always pin the default
728 ret = pin_context(i915->kernel_context, engine,
729 &engine->kernel_context);
734 * Similarly the preempt context must always be available so that
735 * we can interrupt the engine at any time. However, as preemption
736 * is optional, we allow it to fail.
738 if (i915->preempt_context)
739 pin_context(i915->preempt_context, engine,
740 &engine->preempt_context);
742 ret = measure_breadcrumb_dw(engine);
746 engine->emit_fini_breadcrumb_dw = ret;
748 engine->set_default_submission(engine);
753 if (engine->preempt_context)
754 intel_context_unpin(engine->preempt_context);
755 intel_context_unpin(engine->kernel_context);
760 * intel_engines_cleanup_common - cleans up the engine state created by
761 * the common initiailizers.
762 * @engine: Engine to cleanup.
764 * This cleans up everything created by the common helpers.
766 void intel_engine_cleanup_common(struct intel_engine_cs *engine)
768 cleanup_status_page(engine);
770 intel_engine_fini_breadcrumbs(engine);
771 intel_engine_cleanup_cmd_parser(engine);
772 i915_gem_batch_pool_fini(&engine->batch_pool);
774 if (engine->default_state)
775 i915_gem_object_put(engine->default_state);
777 if (engine->preempt_context)
778 intel_context_unpin(engine->preempt_context);
779 intel_context_unpin(engine->kernel_context);
781 i915_timeline_fini(&engine->timeline);
783 intel_wa_list_free(&engine->ctx_wa_list);
784 intel_wa_list_free(&engine->wa_list);
785 intel_wa_list_free(&engine->whitelist);
788 u64 intel_engine_get_active_head(const struct intel_engine_cs *engine)
790 struct drm_i915_private *dev_priv = engine->i915;
793 if (INTEL_GEN(dev_priv) >= 8)
794 acthd = I915_READ64_2x32(RING_ACTHD(engine->mmio_base),
795 RING_ACTHD_UDW(engine->mmio_base));
796 else if (INTEL_GEN(dev_priv) >= 4)
797 acthd = I915_READ(RING_ACTHD(engine->mmio_base));
799 acthd = I915_READ(ACTHD);
804 u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine)
806 struct drm_i915_private *dev_priv = engine->i915;
809 if (INTEL_GEN(dev_priv) >= 8)
810 bbaddr = I915_READ64_2x32(RING_BBADDR(engine->mmio_base),
811 RING_BBADDR_UDW(engine->mmio_base));
813 bbaddr = I915_READ(RING_BBADDR(engine->mmio_base));
818 int intel_engine_stop_cs(struct intel_engine_cs *engine)
820 struct intel_uncore *uncore = &engine->i915->uncore;
821 const u32 base = engine->mmio_base;
822 const i915_reg_t mode = RING_MI_MODE(base);
825 if (INTEL_GEN(engine->i915) < 3)
828 GEM_TRACE("%s\n", engine->name);
830 intel_uncore_write_fw(uncore, mode, _MASKED_BIT_ENABLE(STOP_RING));
833 if (__intel_wait_for_register_fw(uncore,
834 mode, MODE_IDLE, MODE_IDLE,
837 GEM_TRACE("%s: timed out on STOP_RING -> IDLE\n", engine->name);
841 /* A final mmio read to let GPU writes be hopefully flushed to memory */
842 intel_uncore_posting_read_fw(uncore, mode);
847 void intel_engine_cancel_stop_cs(struct intel_engine_cs *engine)
849 struct drm_i915_private *dev_priv = engine->i915;
851 GEM_TRACE("%s\n", engine->name);
853 I915_WRITE_FW(RING_MI_MODE(engine->mmio_base),
854 _MASKED_BIT_DISABLE(STOP_RING));
857 const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
860 case I915_CACHE_NONE: return " uncached";
861 case I915_CACHE_LLC: return HAS_LLC(i915) ? " LLC" : " snooped";
862 case I915_CACHE_L3_LLC: return " L3+LLC";
863 case I915_CACHE_WT: return " WT";
868 u32 intel_calculate_mcr_s_ss_select(struct drm_i915_private *dev_priv)
870 const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
872 u32 slice = fls(sseu->slice_mask);
873 u32 subslice = fls(sseu->subslice_mask[slice]);
875 if (IS_GEN(dev_priv, 10))
876 mcr_s_ss_select = GEN8_MCR_SLICE(slice) |
877 GEN8_MCR_SUBSLICE(subslice);
878 else if (INTEL_GEN(dev_priv) >= 11)
879 mcr_s_ss_select = GEN11_MCR_SLICE(slice) |
880 GEN11_MCR_SUBSLICE(subslice);
884 return mcr_s_ss_select;
888 read_subslice_reg(struct drm_i915_private *dev_priv, int slice,
889 int subslice, i915_reg_t reg)
891 struct intel_uncore *uncore = &dev_priv->uncore;
892 u32 mcr_slice_subslice_mask;
893 u32 mcr_slice_subslice_select;
894 u32 default_mcr_s_ss_select;
897 enum forcewake_domains fw_domains;
899 if (INTEL_GEN(dev_priv) >= 11) {
900 mcr_slice_subslice_mask = GEN11_MCR_SLICE_MASK |
901 GEN11_MCR_SUBSLICE_MASK;
902 mcr_slice_subslice_select = GEN11_MCR_SLICE(slice) |
903 GEN11_MCR_SUBSLICE(subslice);
905 mcr_slice_subslice_mask = GEN8_MCR_SLICE_MASK |
906 GEN8_MCR_SUBSLICE_MASK;
907 mcr_slice_subslice_select = GEN8_MCR_SLICE(slice) |
908 GEN8_MCR_SUBSLICE(subslice);
911 default_mcr_s_ss_select = intel_calculate_mcr_s_ss_select(dev_priv);
913 fw_domains = intel_uncore_forcewake_for_reg(uncore, reg,
915 fw_domains |= intel_uncore_forcewake_for_reg(uncore,
917 FW_REG_READ | FW_REG_WRITE);
919 spin_lock_irq(&uncore->lock);
920 intel_uncore_forcewake_get__locked(uncore, fw_domains);
922 mcr = intel_uncore_read_fw(uncore, GEN8_MCR_SELECTOR);
924 WARN_ON_ONCE((mcr & mcr_slice_subslice_mask) !=
925 default_mcr_s_ss_select);
927 mcr &= ~mcr_slice_subslice_mask;
928 mcr |= mcr_slice_subslice_select;
929 intel_uncore_write_fw(uncore, GEN8_MCR_SELECTOR, mcr);
931 ret = intel_uncore_read_fw(uncore, reg);
933 mcr &= ~mcr_slice_subslice_mask;
934 mcr |= default_mcr_s_ss_select;
936 intel_uncore_write_fw(uncore, GEN8_MCR_SELECTOR, mcr);
938 intel_uncore_forcewake_put__locked(uncore, fw_domains);
939 spin_unlock_irq(&uncore->lock);
944 /* NB: please notice the memset */
945 void intel_engine_get_instdone(struct intel_engine_cs *engine,
946 struct intel_instdone *instdone)
948 struct drm_i915_private *dev_priv = engine->i915;
949 u32 mmio_base = engine->mmio_base;
953 memset(instdone, 0, sizeof(*instdone));
955 switch (INTEL_GEN(dev_priv)) {
957 instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));
959 if (engine->id != RCS0)
962 instdone->slice_common = I915_READ(GEN7_SC_INSTDONE);
963 for_each_instdone_slice_subslice(dev_priv, slice, subslice) {
964 instdone->sampler[slice][subslice] =
965 read_subslice_reg(dev_priv, slice, subslice,
966 GEN7_SAMPLER_INSTDONE);
967 instdone->row[slice][subslice] =
968 read_subslice_reg(dev_priv, slice, subslice,
973 instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));
975 if (engine->id != RCS0)
978 instdone->slice_common = I915_READ(GEN7_SC_INSTDONE);
979 instdone->sampler[0][0] = I915_READ(GEN7_SAMPLER_INSTDONE);
980 instdone->row[0][0] = I915_READ(GEN7_ROW_INSTDONE);
986 instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));
988 if (engine->id == RCS0)
989 /* HACK: Using the wrong struct member */
990 instdone->slice_common = I915_READ(GEN4_INSTDONE1);
994 instdone->instdone = I915_READ(GEN2_INSTDONE);
999 static bool ring_is_idle(struct intel_engine_cs *engine)
1001 struct drm_i915_private *dev_priv = engine->i915;
1002 intel_wakeref_t wakeref;
1005 if (I915_SELFTEST_ONLY(!engine->mmio_base))
1008 /* If the whole device is asleep, the engine must be idle */
1009 wakeref = intel_runtime_pm_get_if_in_use(dev_priv);
1013 /* First check that no commands are left in the ring */
1014 if ((I915_READ_HEAD(engine) & HEAD_ADDR) !=
1015 (I915_READ_TAIL(engine) & TAIL_ADDR))
1018 /* No bit for gen2, so assume the CS parser is idle */
1019 if (INTEL_GEN(dev_priv) > 2 && !(I915_READ_MODE(engine) & MODE_IDLE))
1022 intel_runtime_pm_put(dev_priv, wakeref);
1028 * intel_engine_is_idle() - Report if the engine has finished process all work
1029 * @engine: the intel_engine_cs
1031 * Return true if there are no requests pending, nothing left to be submitted
1032 * to hardware, and that the engine is idle.
1034 bool intel_engine_is_idle(struct intel_engine_cs *engine)
1036 /* More white lies, if wedged, hw state is inconsistent */
1037 if (i915_reset_failed(engine->i915))
1040 /* Waiting to drain ELSP? */
1041 if (READ_ONCE(engine->execlists.active)) {
1042 struct tasklet_struct *t = &engine->execlists.tasklet;
1045 if (tasklet_trylock(t)) {
1046 /* Must wait for any GPU reset in progress. */
1047 if (__tasklet_is_enabled(t))
1053 /* Otherwise flush the tasklet if it was on another cpu */
1054 tasklet_unlock_wait(t);
1056 if (READ_ONCE(engine->execlists.active))
1060 /* ELSP is empty, but there are ready requests? E.g. after reset */
1061 if (!RB_EMPTY_ROOT(&engine->execlists.queue.rb_root))
1065 return ring_is_idle(engine);
1068 bool intel_engines_are_idle(struct drm_i915_private *i915)
1070 struct intel_engine_cs *engine;
1071 enum intel_engine_id id;
1074 * If the driver is wedged, HW state may be very inconsistent and
1075 * report that it is still busy, even though we have stopped using it.
1077 if (i915_reset_failed(i915))
1080 /* Already parked (and passed an idleness test); must still be idle */
1081 if (!READ_ONCE(i915->gt.awake))
1084 for_each_engine(engine, i915, id) {
1085 if (!intel_engine_is_idle(engine))
1092 void intel_engines_reset_default_submission(struct drm_i915_private *i915)
1094 struct intel_engine_cs *engine;
1095 enum intel_engine_id id;
1097 for_each_engine(engine, i915, id)
1098 engine->set_default_submission(engine);
1101 static bool reset_engines(struct drm_i915_private *i915)
1103 if (INTEL_INFO(i915)->gpu_reset_clobbers_display)
1106 return intel_gpu_reset(i915, ALL_ENGINES) == 0;
1110 * intel_engines_sanitize: called after the GPU has lost power
1111 * @i915: the i915 device
1112 * @force: ignore a failed reset and sanitize engine state anyway
1114 * Anytime we reset the GPU, either with an explicit GPU reset or through a
1115 * PCI power cycle, the GPU loses state and we must reset our state tracking
1116 * to match. Note that calling intel_engines_sanitize() if the GPU has not
1117 * been reset results in much confusion!
1119 void intel_engines_sanitize(struct drm_i915_private *i915, bool force)
1121 struct intel_engine_cs *engine;
1122 enum intel_engine_id id;
1126 if (!reset_engines(i915) && !force)
1129 for_each_engine(engine, i915, id)
1130 intel_engine_reset(engine, false);
1134 * intel_engines_park: called when the GT is transitioning from busy->idle
1135 * @i915: the i915 device
1137 * The GT is now idle and about to go to sleep (maybe never to wake again?).
1138 * Time for us to tidy and put away our toys (release resources back to the
1141 void intel_engines_park(struct drm_i915_private *i915)
1143 struct intel_engine_cs *engine;
1144 enum intel_engine_id id;
1146 for_each_engine(engine, i915, id) {
1147 /* Flush the residual irq tasklets first. */
1148 intel_engine_disarm_breadcrumbs(engine);
1149 tasklet_kill(&engine->execlists.tasklet);
1152 * We are committed now to parking the engines, make sure there
1153 * will be no more interrupts arriving later and the engines
1156 if (wait_for(intel_engine_is_idle(engine), 10)) {
1157 struct drm_printer p = drm_debug_printer(__func__);
1159 dev_err(i915->drm.dev,
1160 "%s is not idle before parking\n",
1162 intel_engine_dump(engine, &p, NULL);
1165 /* Must be reset upon idling, or we may miss the busy wakeup. */
1166 GEM_BUG_ON(engine->execlists.queue_priority_hint != INT_MIN);
1169 engine->park(engine);
1171 if (engine->pinned_default_state) {
1172 i915_gem_object_unpin_map(engine->default_state);
1173 engine->pinned_default_state = NULL;
1176 i915_gem_batch_pool_fini(&engine->batch_pool);
1177 engine->execlists.no_priolist = false;
1180 i915->gt.active_engines = 0;
1184 * intel_engines_unpark: called when the GT is transitioning from idle->busy
1185 * @i915: the i915 device
1187 * The GT was idle and now about to fire up with some new user requests.
1189 void intel_engines_unpark(struct drm_i915_private *i915)
1191 struct intel_engine_cs *engine;
1192 enum intel_engine_id id;
1194 for_each_engine(engine, i915, id) {
1197 /* Pin the default state for fast resets from atomic context. */
1199 if (engine->default_state)
1200 map = i915_gem_object_pin_map(engine->default_state,
1202 if (!IS_ERR_OR_NULL(map))
1203 engine->pinned_default_state = map;
1206 engine->unpark(engine);
1208 intel_engine_init_hangcheck(engine);
1213 * intel_engine_lost_context: called when the GPU is reset into unknown state
1214 * @engine: the engine
1216 * We have either reset the GPU or otherwise about to lose state tracking of
1217 * the current GPU logical state (e.g. suspend). On next use, it is therefore
1218 * imperative that we make no presumptions about the current state and load
1221 void intel_engine_lost_context(struct intel_engine_cs *engine)
1223 struct intel_context *ce;
1225 lockdep_assert_held(&engine->i915->drm.struct_mutex);
1227 ce = fetch_and_zero(&engine->last_retired_context);
1229 intel_context_unpin(ce);
1232 bool intel_engine_can_store_dword(struct intel_engine_cs *engine)
1234 switch (INTEL_GEN(engine->i915)) {
1236 return false; /* uses physical not virtual addresses */
1238 /* maybe only uses physical not virtual addresses */
1239 return !(IS_I915G(engine->i915) || IS_I915GM(engine->i915));
1241 return engine->class != VIDEO_DECODE_CLASS; /* b0rked */
1247 unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915)
1249 struct intel_engine_cs *engine;
1250 enum intel_engine_id id;
1254 for_each_engine(engine, i915, id)
1255 if (engine->default_state)
1256 which |= BIT(engine->uabi_class);
1261 static int print_sched_attr(struct drm_i915_private *i915,
1262 const struct i915_sched_attr *attr,
1263 char *buf, int x, int len)
1265 if (attr->priority == I915_PRIORITY_INVALID)
1268 x += snprintf(buf + x, len - x,
1269 " prio=%d", attr->priority);
1274 static void print_request(struct drm_printer *m,
1275 struct i915_request *rq,
1278 const char *name = rq->fence.ops->get_timeline_name(&rq->fence);
1282 x = print_sched_attr(rq->i915, &rq->sched.attr, buf, x, sizeof(buf));
1284 drm_printf(m, "%s %llx:%llx%s%s %s @ %dms: %s\n",
1286 rq->fence.context, rq->fence.seqno,
1287 i915_request_completed(rq) ? "!" :
1288 i915_request_started(rq) ? "*" :
1290 test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
1291 &rq->fence.flags) ? "+" : "",
1293 jiffies_to_msecs(jiffies - rq->emitted_jiffies),
1297 static void hexdump(struct drm_printer *m, const void *buf, size_t len)
1299 const size_t rowsize = 8 * sizeof(u32);
1300 const void *prev = NULL;
1304 for (pos = 0; pos < len; pos += rowsize) {
1307 if (prev && !memcmp(prev, buf + pos, rowsize)) {
1309 drm_printf(m, "*\n");
1315 WARN_ON_ONCE(hex_dump_to_buffer(buf + pos, len - pos,
1316 rowsize, sizeof(u32),
1318 false) >= sizeof(line));
1319 drm_printf(m, "[%04zx] %s\n", pos, line);
1326 static void intel_engine_print_registers(const struct intel_engine_cs *engine,
1327 struct drm_printer *m)
1329 struct drm_i915_private *dev_priv = engine->i915;
1330 const struct intel_engine_execlists * const execlists =
1334 if (engine->id == RCS0 && IS_GEN_RANGE(dev_priv, 4, 7))
1335 drm_printf(m, "\tCCID: 0x%08x\n", I915_READ(CCID));
1336 drm_printf(m, "\tRING_START: 0x%08x\n",
1337 I915_READ(RING_START(engine->mmio_base)));
1338 drm_printf(m, "\tRING_HEAD: 0x%08x\n",
1339 I915_READ(RING_HEAD(engine->mmio_base)) & HEAD_ADDR);
1340 drm_printf(m, "\tRING_TAIL: 0x%08x\n",
1341 I915_READ(RING_TAIL(engine->mmio_base)) & TAIL_ADDR);
1342 drm_printf(m, "\tRING_CTL: 0x%08x%s\n",
1343 I915_READ(RING_CTL(engine->mmio_base)),
1344 I915_READ(RING_CTL(engine->mmio_base)) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? " [waiting]" : "");
1345 if (INTEL_GEN(engine->i915) > 2) {
1346 drm_printf(m, "\tRING_MODE: 0x%08x%s\n",
1347 I915_READ(RING_MI_MODE(engine->mmio_base)),
1348 I915_READ(RING_MI_MODE(engine->mmio_base)) & (MODE_IDLE) ? " [idle]" : "");
1351 if (INTEL_GEN(dev_priv) >= 6) {
1352 drm_printf(m, "\tRING_IMR: %08x\n", I915_READ_IMR(engine));
1355 addr = intel_engine_get_active_head(engine);
1356 drm_printf(m, "\tACTHD: 0x%08x_%08x\n",
1357 upper_32_bits(addr), lower_32_bits(addr));
1358 addr = intel_engine_get_last_batch_head(engine);
1359 drm_printf(m, "\tBBADDR: 0x%08x_%08x\n",
1360 upper_32_bits(addr), lower_32_bits(addr));
1361 if (INTEL_GEN(dev_priv) >= 8)
1362 addr = I915_READ64_2x32(RING_DMA_FADD(engine->mmio_base),
1363 RING_DMA_FADD_UDW(engine->mmio_base));
1364 else if (INTEL_GEN(dev_priv) >= 4)
1365 addr = I915_READ(RING_DMA_FADD(engine->mmio_base));
1367 addr = I915_READ(DMA_FADD_I8XX);
1368 drm_printf(m, "\tDMA_FADDR: 0x%08x_%08x\n",
1369 upper_32_bits(addr), lower_32_bits(addr));
1370 if (INTEL_GEN(dev_priv) >= 4) {
1371 drm_printf(m, "\tIPEIR: 0x%08x\n",
1372 I915_READ(RING_IPEIR(engine->mmio_base)));
1373 drm_printf(m, "\tIPEHR: 0x%08x\n",
1374 I915_READ(RING_IPEHR(engine->mmio_base)));
1376 drm_printf(m, "\tIPEIR: 0x%08x\n", I915_READ(IPEIR));
1377 drm_printf(m, "\tIPEHR: 0x%08x\n", I915_READ(IPEHR));
1380 if (HAS_EXECLISTS(dev_priv)) {
1382 &engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX];
1386 drm_printf(m, "\tExeclist status: 0x%08x %08x\n",
1387 I915_READ(RING_EXECLIST_STATUS_LO(engine)),
1388 I915_READ(RING_EXECLIST_STATUS_HI(engine)));
1390 read = execlists->csb_head;
1391 write = READ_ONCE(*execlists->csb_write);
1393 drm_printf(m, "\tExeclist CSB read %d, write %d [mmio:%d], tasklet queued? %s (%s)\n",
1395 GEN8_CSB_WRITE_PTR(I915_READ(RING_CONTEXT_STATUS_PTR(engine))),
1396 yesno(test_bit(TASKLET_STATE_SCHED,
1397 &engine->execlists.tasklet.state)),
1398 enableddisabled(!atomic_read(&engine->execlists.tasklet.count)));
1399 if (read >= GEN8_CSB_ENTRIES)
1401 if (write >= GEN8_CSB_ENTRIES)
1404 write += GEN8_CSB_ENTRIES;
1405 while (read < write) {
1406 idx = ++read % GEN8_CSB_ENTRIES;
1407 drm_printf(m, "\tExeclist CSB[%d]: 0x%08x [mmio:0x%08x], context: %d [mmio:%d]\n",
1410 I915_READ(RING_CONTEXT_STATUS_BUF_LO(engine, idx)),
1412 I915_READ(RING_CONTEXT_STATUS_BUF_HI(engine, idx)));
1416 for (idx = 0; idx < execlists_num_ports(execlists); idx++) {
1417 struct i915_request *rq;
1420 rq = port_unpack(&execlists->port[idx], &count);
1424 snprintf(hdr, sizeof(hdr),
1425 "\t\tELSP[%d] count=%d, ring:{start:%08x, hwsp:%08x, seqno:%08x}, rq: ",
1427 i915_ggtt_offset(rq->ring->vma),
1428 rq->timeline->hwsp_offset,
1430 print_request(m, rq, hdr);
1432 drm_printf(m, "\t\tELSP[%d] idle\n", idx);
1435 drm_printf(m, "\t\tHW active? 0x%x\n", execlists->active);
1437 } else if (INTEL_GEN(dev_priv) > 6) {
1438 drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
1439 I915_READ(RING_PP_DIR_BASE(engine)));
1440 drm_printf(m, "\tPP_DIR_BASE_READ: 0x%08x\n",
1441 I915_READ(RING_PP_DIR_BASE_READ(engine)));
1442 drm_printf(m, "\tPP_DIR_DCLV: 0x%08x\n",
1443 I915_READ(RING_PP_DIR_DCLV(engine)));
1447 static void print_request_ring(struct drm_printer *m, struct i915_request *rq)
1453 "[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]:\n",
1454 rq->head, rq->postfix, rq->tail,
1455 rq->batch ? upper_32_bits(rq->batch->node.start) : ~0u,
1456 rq->batch ? lower_32_bits(rq->batch->node.start) : ~0u);
1458 size = rq->tail - rq->head;
1459 if (rq->tail < rq->head)
1460 size += rq->ring->size;
1462 ring = kmalloc(size, GFP_ATOMIC);
1464 const void *vaddr = rq->ring->vaddr;
1465 unsigned int head = rq->head;
1466 unsigned int len = 0;
1468 if (rq->tail < head) {
1469 len = rq->ring->size - head;
1470 memcpy(ring, vaddr + head, len);
1473 memcpy(ring + len, vaddr + head, size - len);
1475 hexdump(m, ring, size);
1480 void intel_engine_dump(struct intel_engine_cs *engine,
1481 struct drm_printer *m,
1482 const char *header, ...)
1484 struct i915_gpu_error * const error = &engine->i915->gpu_error;
1485 struct i915_request *rq;
1486 intel_wakeref_t wakeref;
1491 va_start(ap, header);
1492 drm_vprintf(m, header, &ap);
1496 if (i915_reset_failed(engine->i915))
1497 drm_printf(m, "*** WEDGED ***\n");
1499 drm_printf(m, "\tHangcheck %x:%x [%d ms]\n",
1500 engine->hangcheck.last_seqno,
1501 engine->hangcheck.next_seqno,
1502 jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp));
1503 drm_printf(m, "\tReset count: %d (global %d)\n",
1504 i915_reset_engine_count(error, engine),
1505 i915_reset_count(error));
1509 drm_printf(m, "\tRequests:\n");
1511 rq = list_first_entry(&engine->timeline.requests,
1512 struct i915_request, link);
1513 if (&rq->link != &engine->timeline.requests)
1514 print_request(m, rq, "\t\tfirst ");
1516 rq = list_last_entry(&engine->timeline.requests,
1517 struct i915_request, link);
1518 if (&rq->link != &engine->timeline.requests)
1519 print_request(m, rq, "\t\tlast ");
1521 rq = intel_engine_find_active_request(engine);
1523 print_request(m, rq, "\t\tactive ");
1525 drm_printf(m, "\t\tring->start: 0x%08x\n",
1526 i915_ggtt_offset(rq->ring->vma));
1527 drm_printf(m, "\t\tring->head: 0x%08x\n",
1529 drm_printf(m, "\t\tring->tail: 0x%08x\n",
1531 drm_printf(m, "\t\tring->emit: 0x%08x\n",
1533 drm_printf(m, "\t\tring->space: 0x%08x\n",
1535 drm_printf(m, "\t\tring->hwsp: 0x%08x\n",
1536 rq->timeline->hwsp_offset);
1538 print_request_ring(m, rq);
1543 wakeref = intel_runtime_pm_get_if_in_use(engine->i915);
1545 intel_engine_print_registers(engine, m);
1546 intel_runtime_pm_put(engine->i915, wakeref);
1548 drm_printf(m, "\tDevice is asleep; skipping register dump\n");
1551 intel_execlists_show_requests(engine, m, print_request, 8);
1553 drm_printf(m, "HWSP:\n");
1554 hexdump(m, engine->status_page.addr, PAGE_SIZE);
1556 drm_printf(m, "Idle? %s\n", yesno(intel_engine_is_idle(engine)));
1558 intel_engine_print_breadcrumbs(engine, m);
1561 static u8 user_class_map[] = {
1562 [I915_ENGINE_CLASS_RENDER] = RENDER_CLASS,
1563 [I915_ENGINE_CLASS_COPY] = COPY_ENGINE_CLASS,
1564 [I915_ENGINE_CLASS_VIDEO] = VIDEO_DECODE_CLASS,
1565 [I915_ENGINE_CLASS_VIDEO_ENHANCE] = VIDEO_ENHANCEMENT_CLASS,
1568 struct intel_engine_cs *
1569 intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance)
1571 if (class >= ARRAY_SIZE(user_class_map))
1574 class = user_class_map[class];
1576 GEM_BUG_ON(class > MAX_ENGINE_CLASS);
1578 if (instance > MAX_ENGINE_INSTANCE)
1581 return i915->engine_class[class][instance];
1585 * intel_enable_engine_stats() - Enable engine busy tracking on engine
1586 * @engine: engine to enable stats collection
1588 * Start collecting the engine busyness data for @engine.
1590 * Returns 0 on success or a negative error code.
1592 int intel_enable_engine_stats(struct intel_engine_cs *engine)
1594 struct intel_engine_execlists *execlists = &engine->execlists;
1595 unsigned long flags;
1598 if (!intel_engine_supports_stats(engine))
1601 spin_lock_irqsave(&engine->timeline.lock, flags);
1602 write_seqlock(&engine->stats.lock);
1604 if (unlikely(engine->stats.enabled == ~0)) {
1609 if (engine->stats.enabled++ == 0) {
1610 const struct execlist_port *port = execlists->port;
1611 unsigned int num_ports = execlists_num_ports(execlists);
1613 engine->stats.enabled_at = ktime_get();
1615 /* XXX submission method oblivious? */
1616 while (num_ports-- && port_isset(port)) {
1617 engine->stats.active++;
1621 if (engine->stats.active)
1622 engine->stats.start = engine->stats.enabled_at;
1626 write_sequnlock(&engine->stats.lock);
1627 spin_unlock_irqrestore(&engine->timeline.lock, flags);
1632 static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine)
1634 ktime_t total = engine->stats.total;
1637 * If the engine is executing something at the moment
1638 * add it to the total.
1640 if (engine->stats.active)
1641 total = ktime_add(total,
1642 ktime_sub(ktime_get(), engine->stats.start));
1648 * intel_engine_get_busy_time() - Return current accumulated engine busyness
1649 * @engine: engine to report on
1651 * Returns accumulated time @engine was busy since engine stats were enabled.
1653 ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine)
1659 seq = read_seqbegin(&engine->stats.lock);
1660 total = __intel_engine_get_busy_time(engine);
1661 } while (read_seqretry(&engine->stats.lock, seq));
1667 * intel_disable_engine_stats() - Disable engine busy tracking on engine
1668 * @engine: engine to disable stats collection
1670 * Stops collecting the engine busyness data for @engine.
1672 void intel_disable_engine_stats(struct intel_engine_cs *engine)
1674 unsigned long flags;
1676 if (!intel_engine_supports_stats(engine))
1679 write_seqlock_irqsave(&engine->stats.lock, flags);
1680 WARN_ON_ONCE(engine->stats.enabled == 0);
1681 if (--engine->stats.enabled == 0) {
1682 engine->stats.total = __intel_engine_get_busy_time(engine);
1683 engine->stats.active = 0;
1685 write_sequnlock_irqrestore(&engine->stats.lock, flags);
1688 static bool match_ring(struct i915_request *rq)
1690 struct drm_i915_private *dev_priv = rq->i915;
1691 u32 ring = I915_READ(RING_START(rq->engine->mmio_base));
1693 return ring == i915_ggtt_offset(rq->ring->vma);
1696 struct i915_request *
1697 intel_engine_find_active_request(struct intel_engine_cs *engine)
1699 struct i915_request *request, *active = NULL;
1700 unsigned long flags;
1703 * We are called by the error capture, reset and to dump engine
1704 * state at random points in time. In particular, note that neither is
1705 * crucially ordered with an interrupt. After a hang, the GPU is dead
1706 * and we assume that no more writes can happen (we waited long enough
1707 * for all writes that were in transaction to be flushed) - adding an
1708 * extra delay for a recent interrupt is pointless. Hence, we do
1709 * not need an engine->irq_seqno_barrier() before the seqno reads.
1710 * At all other times, we must assume the GPU is still running, but
1711 * we only care about the snapshot of this moment.
1713 spin_lock_irqsave(&engine->timeline.lock, flags);
1714 list_for_each_entry(request, &engine->timeline.requests, link) {
1715 if (i915_request_completed(request))
1718 if (!i915_request_started(request))
1721 /* More than one preemptible request may match! */
1722 if (!match_ring(request))
1728 spin_unlock_irqrestore(&engine->timeline.lock, flags);
1733 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1734 #include "selftests/mock_engine.c"
1735 #include "selftests/intel_engine_cs.c"