2 * Copyright © 2008-2010 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
25 * Zou Nan hai <nanhai.zou@intel.com>
26 * Xiang Hai hao<haihao.xiang@intel.com>
30 #include <linux/log2.h>
32 #include <drm/i915_drm.h>
34 #include "gem/i915_gem_context.h"
37 #include "i915_trace.h"
38 #include "intel_context.h"
40 #include "intel_gt_irq.h"
41 #include "intel_gt_pm_irq.h"
42 #include "intel_reset.h"
43 #include "intel_ring.h"
44 #include "intel_workarounds.h"
46 /* Rough estimate of the typical request size, performing a flush,
47 * set-context and then emitting the batch.
49 #define LEGACY_REQUEST_SIZE 200
52 gen2_render_ring_flush(struct i915_request *rq, u32 mode)
54 unsigned int num_store_dw;
59 if (mode & EMIT_INVALIDATE)
61 if (mode & EMIT_FLUSH)
64 cs = intel_ring_begin(rq, 2 + 3 * num_store_dw);
69 while (num_store_dw--) {
70 *cs++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
71 *cs++ = intel_gt_scratch_offset(rq->engine->gt,
72 INTEL_GT_SCRATCH_FIELD_DEFAULT);
75 *cs++ = MI_FLUSH | MI_NO_WRITE_FLUSH;
77 intel_ring_advance(rq, cs);
83 gen4_render_ring_flush(struct i915_request *rq, u32 mode)
91 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
92 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
93 * also flushed at 2d versus 3d pipeline switches.
97 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
98 * MI_READ_FLUSH is set, and is always flushed on 965.
100 * I915_GEM_DOMAIN_COMMAND may not exist?
102 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
103 * invalidated when MI_EXE_FLUSH is set.
105 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
106 * invalidated with every MI_FLUSH.
110 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
111 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
112 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
113 * are flushed at any MI_FLUSH.
117 if (mode & EMIT_INVALIDATE) {
119 if (IS_G4X(rq->i915) || IS_GEN(rq->i915, 5))
120 cmd |= MI_INVALIDATE_ISP;
124 if (mode & EMIT_INVALIDATE)
127 cs = intel_ring_begin(rq, i);
134 * A random delay to let the CS invalidate take effect? Without this
135 * delay, the GPU relocation path fails as the CS does not see
136 * the updated contents. Just as important, if we apply the flushes
137 * to the EMIT_FLUSH branch (i.e. immediately after the relocation
138 * write and before the invalidate on the next batch), the relocations
139 * still fail. This implies that is a delay following invalidation
140 * that is required to reset the caches as opposed to a delay to
141 * ensure the memory is written.
143 if (mode & EMIT_INVALIDATE) {
144 *cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE;
145 *cs++ = intel_gt_scratch_offset(rq->engine->gt,
146 INTEL_GT_SCRATCH_FIELD_DEFAULT) |
147 PIPE_CONTROL_GLOBAL_GTT;
151 for (i = 0; i < 12; i++)
154 *cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE;
155 *cs++ = intel_gt_scratch_offset(rq->engine->gt,
156 INTEL_GT_SCRATCH_FIELD_DEFAULT) |
157 PIPE_CONTROL_GLOBAL_GTT;
164 intel_ring_advance(rq, cs);
170 * Emits a PIPE_CONTROL with a non-zero post-sync operation, for
171 * implementing two workarounds on gen6. From section 1.4.7.1
172 * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
174 * [DevSNB-C+{W/A}] Before any depth stall flush (including those
175 * produced by non-pipelined state commands), software needs to first
176 * send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
179 * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
180 * =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
182 * And the workaround for these two requires this workaround first:
184 * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
185 * BEFORE the pipe-control with a post-sync op and no write-cache
188 * And this last workaround is tricky because of the requirements on
189 * that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM
192 * "1 of the following must also be set:
193 * - Render Target Cache Flush Enable ([12] of DW1)
194 * - Depth Cache Flush Enable ([0] of DW1)
195 * - Stall at Pixel Scoreboard ([1] of DW1)
196 * - Depth Stall ([13] of DW1)
197 * - Post-Sync Operation ([13] of DW1)
198 * - Notify Enable ([8] of DW1)"
200 * The cache flushes require the workaround flush that triggered this
201 * one, so we can't use it. Depth stall would trigger the same.
202 * Post-sync nonzero is what triggered this second workaround, so we
203 * can't use that one either. Notify enable is IRQs, which aren't
204 * really our business. That leaves only stall at scoreboard.
207 gen6_emit_post_sync_nonzero_flush(struct i915_request *rq)
210 intel_gt_scratch_offset(rq->engine->gt,
211 INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH);
214 cs = intel_ring_begin(rq, 6);
218 *cs++ = GFX_OP_PIPE_CONTROL(5);
219 *cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD;
220 *cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
221 *cs++ = 0; /* low dword */
222 *cs++ = 0; /* high dword */
224 intel_ring_advance(rq, cs);
226 cs = intel_ring_begin(rq, 6);
230 *cs++ = GFX_OP_PIPE_CONTROL(5);
231 *cs++ = PIPE_CONTROL_QW_WRITE;
232 *cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
236 intel_ring_advance(rq, cs);
242 gen6_render_ring_flush(struct i915_request *rq, u32 mode)
245 intel_gt_scratch_offset(rq->engine->gt,
246 INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH);
250 /* Force SNB workarounds for PIPE_CONTROL flushes */
251 ret = gen6_emit_post_sync_nonzero_flush(rq);
255 /* Just flush everything. Experiments have shown that reducing the
256 * number of bits based on the write domains has little performance
259 if (mode & EMIT_FLUSH) {
260 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
261 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
263 * Ensure that any following seqno writes only happen
264 * when the render cache is indeed flushed.
266 flags |= PIPE_CONTROL_CS_STALL;
268 if (mode & EMIT_INVALIDATE) {
269 flags |= PIPE_CONTROL_TLB_INVALIDATE;
270 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
271 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
272 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
273 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
274 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
276 * TLB invalidate requires a post-sync write.
278 flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
281 cs = intel_ring_begin(rq, 4);
285 *cs++ = GFX_OP_PIPE_CONTROL(4);
287 *cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
289 intel_ring_advance(rq, cs);
294 static u32 *gen6_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
296 /* First we do the gen6_emit_post_sync_nonzero_flush w/a */
297 *cs++ = GFX_OP_PIPE_CONTROL(4);
298 *cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD;
302 *cs++ = GFX_OP_PIPE_CONTROL(4);
303 *cs++ = PIPE_CONTROL_QW_WRITE;
304 *cs++ = intel_gt_scratch_offset(rq->engine->gt,
305 INTEL_GT_SCRATCH_FIELD_DEFAULT) |
306 PIPE_CONTROL_GLOBAL_GTT;
309 /* Finally we can flush and with it emit the breadcrumb */
310 *cs++ = GFX_OP_PIPE_CONTROL(4);
311 *cs++ = (PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
312 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
313 PIPE_CONTROL_DC_FLUSH_ENABLE |
314 PIPE_CONTROL_QW_WRITE |
315 PIPE_CONTROL_CS_STALL);
316 *cs++ = i915_request_active_timeline(rq)->hwsp_offset |
317 PIPE_CONTROL_GLOBAL_GTT;
318 *cs++ = rq->fence.seqno;
320 *cs++ = MI_USER_INTERRUPT;
323 rq->tail = intel_ring_offset(rq, cs);
324 assert_ring_tail_valid(rq->ring, rq->tail);
330 gen7_render_ring_cs_stall_wa(struct i915_request *rq)
334 cs = intel_ring_begin(rq, 4);
338 *cs++ = GFX_OP_PIPE_CONTROL(4);
339 *cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD;
342 intel_ring_advance(rq, cs);
348 gen7_render_ring_flush(struct i915_request *rq, u32 mode)
351 intel_gt_scratch_offset(rq->engine->gt,
352 INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH);
356 * Ensure that any following seqno writes only happen when the render
357 * cache is indeed flushed.
359 * Workaround: 4th PIPE_CONTROL command (except the ones with only
360 * read-cache invalidate bits set) must have the CS_STALL bit set. We
361 * don't try to be clever and just set it unconditionally.
363 flags |= PIPE_CONTROL_CS_STALL;
365 /* Just flush everything. Experiments have shown that reducing the
366 * number of bits based on the write domains has little performance
369 if (mode & EMIT_FLUSH) {
370 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
371 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
372 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
373 flags |= PIPE_CONTROL_FLUSH_ENABLE;
375 if (mode & EMIT_INVALIDATE) {
376 flags |= PIPE_CONTROL_TLB_INVALIDATE;
377 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
378 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
379 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
380 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
381 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
382 flags |= PIPE_CONTROL_MEDIA_STATE_CLEAR;
384 * TLB invalidate requires a post-sync write.
386 flags |= PIPE_CONTROL_QW_WRITE;
387 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
389 flags |= PIPE_CONTROL_STALL_AT_SCOREBOARD;
391 /* Workaround: we must issue a pipe_control with CS-stall bit
392 * set before a pipe_control command that has the state cache
393 * invalidate bit set. */
394 gen7_render_ring_cs_stall_wa(rq);
397 cs = intel_ring_begin(rq, 4);
401 *cs++ = GFX_OP_PIPE_CONTROL(4);
403 *cs++ = scratch_addr;
405 intel_ring_advance(rq, cs);
410 static u32 *gen7_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
412 *cs++ = GFX_OP_PIPE_CONTROL(4);
413 *cs++ = (PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
414 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
415 PIPE_CONTROL_DC_FLUSH_ENABLE |
416 PIPE_CONTROL_FLUSH_ENABLE |
417 PIPE_CONTROL_QW_WRITE |
418 PIPE_CONTROL_GLOBAL_GTT_IVB |
419 PIPE_CONTROL_CS_STALL);
420 *cs++ = i915_request_active_timeline(rq)->hwsp_offset;
421 *cs++ = rq->fence.seqno;
423 *cs++ = MI_USER_INTERRUPT;
426 rq->tail = intel_ring_offset(rq, cs);
427 assert_ring_tail_valid(rq->ring, rq->tail);
432 static u32 *gen6_xcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
434 GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma);
435 GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
437 *cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX;
438 *cs++ = I915_GEM_HWS_SEQNO_ADDR | MI_FLUSH_DW_USE_GTT;
439 *cs++ = rq->fence.seqno;
441 *cs++ = MI_USER_INTERRUPT;
443 rq->tail = intel_ring_offset(rq, cs);
444 assert_ring_tail_valid(rq->ring, rq->tail);
449 #define GEN7_XCS_WA 32
450 static u32 *gen7_xcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
454 GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma);
455 GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
457 *cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX;
458 *cs++ = I915_GEM_HWS_SEQNO_ADDR | MI_FLUSH_DW_USE_GTT;
459 *cs++ = rq->fence.seqno;
461 for (i = 0; i < GEN7_XCS_WA; i++) {
462 *cs++ = MI_STORE_DWORD_INDEX;
463 *cs++ = I915_GEM_HWS_SEQNO_ADDR;
464 *cs++ = rq->fence.seqno;
471 *cs++ = MI_USER_INTERRUPT;
474 rq->tail = intel_ring_offset(rq, cs);
475 assert_ring_tail_valid(rq->ring, rq->tail);
481 static void set_hwstam(struct intel_engine_cs *engine, u32 mask)
484 * Keep the render interrupt unmasked as this papers over
485 * lost interrupts following a reset.
487 if (engine->class == RENDER_CLASS) {
488 if (INTEL_GEN(engine->i915) >= 6)
491 mask &= ~I915_USER_INTERRUPT;
494 intel_engine_set_hwsp_writemask(engine, mask);
497 static void set_hws_pga(struct intel_engine_cs *engine, phys_addr_t phys)
499 struct drm_i915_private *dev_priv = engine->i915;
502 addr = lower_32_bits(phys);
503 if (INTEL_GEN(dev_priv) >= 4)
504 addr |= (phys >> 28) & 0xf0;
506 I915_WRITE(HWS_PGA, addr);
509 static struct page *status_page(struct intel_engine_cs *engine)
511 struct drm_i915_gem_object *obj = engine->status_page.vma->obj;
513 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
514 return sg_page(obj->mm.pages->sgl);
517 static void ring_setup_phys_status_page(struct intel_engine_cs *engine)
519 set_hws_pga(engine, PFN_PHYS(page_to_pfn(status_page(engine))));
520 set_hwstam(engine, ~0u);
523 static void set_hwsp(struct intel_engine_cs *engine, u32 offset)
525 struct drm_i915_private *dev_priv = engine->i915;
529 * The ring status page addresses are no longer next to the rest of
530 * the ring registers as of gen7.
532 if (IS_GEN(dev_priv, 7)) {
533 switch (engine->id) {
535 * No more rings exist on Gen7. Default case is only to shut up
536 * gcc switch check warning.
539 GEM_BUG_ON(engine->id);
542 hwsp = RENDER_HWS_PGA_GEN7;
545 hwsp = BLT_HWS_PGA_GEN7;
548 hwsp = BSD_HWS_PGA_GEN7;
551 hwsp = VEBOX_HWS_PGA_GEN7;
554 } else if (IS_GEN(dev_priv, 6)) {
555 hwsp = RING_HWS_PGA_GEN6(engine->mmio_base);
557 hwsp = RING_HWS_PGA(engine->mmio_base);
560 I915_WRITE(hwsp, offset);
564 static void flush_cs_tlb(struct intel_engine_cs *engine)
566 struct drm_i915_private *dev_priv = engine->i915;
568 if (!IS_GEN_RANGE(dev_priv, 6, 7))
571 /* ring should be idle before issuing a sync flush*/
572 WARN_ON((ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0);
574 ENGINE_WRITE(engine, RING_INSTPM,
575 _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
577 if (intel_wait_for_register(engine->uncore,
578 RING_INSTPM(engine->mmio_base),
579 INSTPM_SYNC_FLUSH, 0,
581 DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
585 static void ring_setup_status_page(struct intel_engine_cs *engine)
587 set_hwsp(engine, i915_ggtt_offset(engine->status_page.vma));
588 set_hwstam(engine, ~0u);
590 flush_cs_tlb(engine);
593 static bool stop_ring(struct intel_engine_cs *engine)
595 struct drm_i915_private *dev_priv = engine->i915;
597 if (INTEL_GEN(dev_priv) > 2) {
599 RING_MI_MODE, _MASKED_BIT_ENABLE(STOP_RING));
600 if (intel_wait_for_register(engine->uncore,
601 RING_MI_MODE(engine->mmio_base),
605 DRM_ERROR("%s : timed out trying to stop ring\n",
609 * Sometimes we observe that the idle flag is not
610 * set even though the ring is empty. So double
611 * check before giving up.
613 if (ENGINE_READ(engine, RING_HEAD) !=
614 ENGINE_READ(engine, RING_TAIL))
619 ENGINE_WRITE(engine, RING_HEAD, ENGINE_READ(engine, RING_TAIL));
621 ENGINE_WRITE(engine, RING_HEAD, 0);
622 ENGINE_WRITE(engine, RING_TAIL, 0);
624 /* The ring must be empty before it is disabled */
625 ENGINE_WRITE(engine, RING_CTL, 0);
627 return (ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR) == 0;
630 static int xcs_resume(struct intel_engine_cs *engine)
632 struct drm_i915_private *dev_priv = engine->i915;
633 struct intel_ring *ring = engine->legacy.ring;
636 GEM_TRACE("%s: ring:{HEAD:%04x, TAIL:%04x}\n",
637 engine->name, ring->head, ring->tail);
639 intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL);
641 /* WaClearRingBufHeadRegAtInit:ctg,elk */
642 if (!stop_ring(engine)) {
643 /* G45 ring initialization often fails to reset head to zero */
644 DRM_DEBUG_DRIVER("%s head not reset to zero "
645 "ctl %08x head %08x tail %08x start %08x\n",
647 ENGINE_READ(engine, RING_CTL),
648 ENGINE_READ(engine, RING_HEAD),
649 ENGINE_READ(engine, RING_TAIL),
650 ENGINE_READ(engine, RING_START));
652 if (!stop_ring(engine)) {
653 DRM_ERROR("failed to set %s head to zero "
654 "ctl %08x head %08x tail %08x start %08x\n",
656 ENGINE_READ(engine, RING_CTL),
657 ENGINE_READ(engine, RING_HEAD),
658 ENGINE_READ(engine, RING_TAIL),
659 ENGINE_READ(engine, RING_START));
665 if (HWS_NEEDS_PHYSICAL(dev_priv))
666 ring_setup_phys_status_page(engine);
668 ring_setup_status_page(engine);
670 intel_engine_reset_breadcrumbs(engine);
672 /* Enforce ordering by reading HEAD register back */
673 ENGINE_POSTING_READ(engine, RING_HEAD);
676 * Initialize the ring. This must happen _after_ we've cleared the ring
677 * registers with the above sequence (the readback of the HEAD registers
678 * also enforces ordering), otherwise the hw might lose the new ring
681 ENGINE_WRITE(engine, RING_START, i915_ggtt_offset(ring->vma));
683 /* Check that the ring offsets point within the ring! */
684 GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head));
685 GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail));
686 intel_ring_update_space(ring);
688 /* First wake the ring up to an empty/idle ring */
689 ENGINE_WRITE(engine, RING_HEAD, ring->head);
690 ENGINE_WRITE(engine, RING_TAIL, ring->head);
691 ENGINE_POSTING_READ(engine, RING_TAIL);
693 ENGINE_WRITE(engine, RING_CTL, RING_CTL_SIZE(ring->size) | RING_VALID);
695 /* If the head is still not zero, the ring is dead */
696 if (intel_wait_for_register(engine->uncore,
697 RING_CTL(engine->mmio_base),
698 RING_VALID, RING_VALID,
700 DRM_ERROR("%s initialization failed "
701 "ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n",
703 ENGINE_READ(engine, RING_CTL),
704 ENGINE_READ(engine, RING_CTL) & RING_VALID,
705 ENGINE_READ(engine, RING_HEAD), ring->head,
706 ENGINE_READ(engine, RING_TAIL), ring->tail,
707 ENGINE_READ(engine, RING_START),
708 i915_ggtt_offset(ring->vma));
713 if (INTEL_GEN(dev_priv) > 2)
715 RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
717 /* Now awake, let it get started */
718 if (ring->tail != ring->head) {
719 ENGINE_WRITE(engine, RING_TAIL, ring->tail);
720 ENGINE_POSTING_READ(engine, RING_TAIL);
723 /* Papering over lost _interrupts_ immediately following the restart */
724 intel_engine_queue_breadcrumbs(engine);
726 intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL);
731 static void reset_prepare(struct intel_engine_cs *engine)
733 struct intel_uncore *uncore = engine->uncore;
734 const u32 base = engine->mmio_base;
737 * We stop engines, otherwise we might get failed reset and a
738 * dead gpu (on elk). Also as modern gpu as kbl can suffer
739 * from system hang if batchbuffer is progressing when
740 * the reset is issued, regardless of READY_TO_RESET ack.
741 * Thus assume it is best to stop engines on all gens
742 * where we have a gpu reset.
744 * WaKBLVECSSemaphoreWaitPoll:kbl (on ALL_ENGINES)
746 * WaMediaResetMainRingCleanup:ctg,elk (presumably)
748 * FIXME: Wa for more modern gens needs to be validated
750 GEM_TRACE("%s\n", engine->name);
752 if (intel_engine_stop_cs(engine))
753 GEM_TRACE("%s: timed out on STOP_RING\n", engine->name);
755 intel_uncore_write_fw(uncore,
757 intel_uncore_read_fw(uncore, RING_TAIL(base)));
758 intel_uncore_posting_read_fw(uncore, RING_HEAD(base)); /* paranoia */
760 intel_uncore_write_fw(uncore, RING_HEAD(base), 0);
761 intel_uncore_write_fw(uncore, RING_TAIL(base), 0);
762 intel_uncore_posting_read_fw(uncore, RING_TAIL(base));
764 /* The ring must be empty before it is disabled */
765 intel_uncore_write_fw(uncore, RING_CTL(base), 0);
767 /* Check acts as a post */
768 if (intel_uncore_read_fw(uncore, RING_HEAD(base)))
769 GEM_TRACE("%s: ring head [%x] not parked\n",
771 intel_uncore_read_fw(uncore, RING_HEAD(base)));
774 static void reset_ring(struct intel_engine_cs *engine, bool stalled)
776 struct i915_request *pos, *rq;
781 spin_lock_irqsave(&engine->active.lock, flags);
782 list_for_each_entry(pos, &engine->active.requests, sched.link) {
783 if (!i915_request_completed(pos)) {
790 * The guilty request will get skipped on a hung engine.
792 * Users of client default contexts do not rely on logical
793 * state preserved between batches so it is safe to execute
794 * queued requests following the hang. Non default contexts
795 * rely on preserved state, so skipping a batch loses the
796 * evolution of the state and it needs to be considered corrupted.
797 * Executing more queued batches on top of corrupted state is
798 * risky. But we take the risk by trying to advance through
799 * the queued requests in order to make the client behaviour
800 * more predictable around resets, by not throwing away random
801 * amount of batches it has prepared for execution. Sophisticated
802 * clients can use gem_reset_stats_ioctl and dma fence status
803 * (exported via sync_file info ioctl on explicit fences) to observe
804 * when it loses the context state and should rebuild accordingly.
806 * The context ban, and ultimately the client ban, mechanism are safety
807 * valves if client submission ends up resulting in nothing more than
813 * Try to restore the logical GPU state to match the
814 * continuation of the request queue. If we skip the
815 * context/PD restore, then the next request may try to execute
816 * assuming that its context is valid and loaded on the GPU and
817 * so may try to access invalid memory, prompting repeated GPU
820 * If the request was guilty, we still restore the logical
821 * state in case the next request requires it (e.g. the
822 * aliasing ppgtt), but skip over the hung batch.
824 * If the request was innocent, we try to replay the request
825 * with the restored context.
827 __i915_request_reset(rq, stalled);
829 GEM_BUG_ON(rq->ring != engine->legacy.ring);
832 head = engine->legacy.ring->tail;
834 engine->legacy.ring->head = intel_ring_wrap(engine->legacy.ring, head);
836 spin_unlock_irqrestore(&engine->active.lock, flags);
839 static void reset_finish(struct intel_engine_cs *engine)
843 static int rcs_resume(struct intel_engine_cs *engine)
845 struct drm_i915_private *dev_priv = engine->i915;
848 * Disable CONSTANT_BUFFER before it is loaded from the context
849 * image. For as it is loaded, it is executed and the stored
850 * address may no longer be valid, leading to a GPU hang.
852 * This imposes the requirement that userspace reload their
853 * CONSTANT_BUFFER on every batch, fortunately a requirement
854 * they are already accustomed to from before contexts were
857 if (IS_GEN(dev_priv, 4))
859 _MASKED_BIT_ENABLE(ECO_CONSTANT_BUFFER_SR_DISABLE));
861 /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
862 if (IS_GEN_RANGE(dev_priv, 4, 6))
863 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
865 /* We need to disable the AsyncFlip performance optimisations in order
866 * to use MI_WAIT_FOR_EVENT within the CS. It should already be
867 * programmed to '1' on all products.
869 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
871 if (IS_GEN_RANGE(dev_priv, 6, 7))
872 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
874 /* Required for the hardware to program scanline values for waiting */
875 /* WaEnableFlushTlbInvalidationMode:snb */
876 if (IS_GEN(dev_priv, 6))
878 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT));
880 /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
881 if (IS_GEN(dev_priv, 7))
882 I915_WRITE(GFX_MODE_GEN7,
883 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) |
884 _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
886 if (IS_GEN(dev_priv, 6)) {
887 /* From the Sandybridge PRM, volume 1 part 3, page 24:
888 * "If this bit is set, STCunit will have LRA as replacement
889 * policy. [...] This bit must be reset. LRA replacement
890 * policy is not supported."
892 I915_WRITE(CACHE_MODE_0,
893 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
896 if (IS_GEN_RANGE(dev_priv, 6, 7))
897 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
899 return xcs_resume(engine);
902 static void cancel_requests(struct intel_engine_cs *engine)
904 struct i915_request *request;
907 spin_lock_irqsave(&engine->active.lock, flags);
909 /* Mark all submitted requests as skipped. */
910 list_for_each_entry(request, &engine->active.requests, sched.link) {
911 if (!i915_request_signaled(request))
912 dma_fence_set_error(&request->fence, -EIO);
914 i915_request_mark_complete(request);
917 /* Remaining _unready_ requests will be nop'ed when submitted */
919 spin_unlock_irqrestore(&engine->active.lock, flags);
922 static void i9xx_submit_request(struct i915_request *request)
924 i915_request_submit(request);
925 wmb(); /* paranoid flush writes out of the WCB before mmio */
927 ENGINE_WRITE(request->engine, RING_TAIL,
928 intel_ring_set_tail(request->ring, request->tail));
931 static u32 *i9xx_emit_breadcrumb(struct i915_request *rq, u32 *cs)
933 GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma);
934 GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
938 *cs++ = MI_STORE_DWORD_INDEX;
939 *cs++ = I915_GEM_HWS_SEQNO_ADDR;
940 *cs++ = rq->fence.seqno;
942 *cs++ = MI_USER_INTERRUPT;
945 rq->tail = intel_ring_offset(rq, cs);
946 assert_ring_tail_valid(rq->ring, rq->tail);
951 #define GEN5_WA_STORES 8 /* must be at least 1! */
952 static u32 *gen5_emit_breadcrumb(struct i915_request *rq, u32 *cs)
956 GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma);
957 GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
961 BUILD_BUG_ON(GEN5_WA_STORES < 1);
962 for (i = 0; i < GEN5_WA_STORES; i++) {
963 *cs++ = MI_STORE_DWORD_INDEX;
964 *cs++ = I915_GEM_HWS_SEQNO_ADDR;
965 *cs++ = rq->fence.seqno;
968 *cs++ = MI_USER_INTERRUPT;
970 rq->tail = intel_ring_offset(rq, cs);
971 assert_ring_tail_valid(rq->ring, rq->tail);
975 #undef GEN5_WA_STORES
978 gen5_irq_enable(struct intel_engine_cs *engine)
980 gen5_gt_enable_irq(engine->gt, engine->irq_enable_mask);
984 gen5_irq_disable(struct intel_engine_cs *engine)
986 gen5_gt_disable_irq(engine->gt, engine->irq_enable_mask);
990 i9xx_irq_enable(struct intel_engine_cs *engine)
992 engine->i915->irq_mask &= ~engine->irq_enable_mask;
993 intel_uncore_write(engine->uncore, GEN2_IMR, engine->i915->irq_mask);
994 intel_uncore_posting_read_fw(engine->uncore, GEN2_IMR);
998 i9xx_irq_disable(struct intel_engine_cs *engine)
1000 engine->i915->irq_mask |= engine->irq_enable_mask;
1001 intel_uncore_write(engine->uncore, GEN2_IMR, engine->i915->irq_mask);
1005 i8xx_irq_enable(struct intel_engine_cs *engine)
1007 struct drm_i915_private *i915 = engine->i915;
1009 i915->irq_mask &= ~engine->irq_enable_mask;
1010 intel_uncore_write16(&i915->uncore, GEN2_IMR, i915->irq_mask);
1011 ENGINE_POSTING_READ16(engine, RING_IMR);
1015 i8xx_irq_disable(struct intel_engine_cs *engine)
1017 struct drm_i915_private *i915 = engine->i915;
1019 i915->irq_mask |= engine->irq_enable_mask;
1020 intel_uncore_write16(&i915->uncore, GEN2_IMR, i915->irq_mask);
1024 bsd_ring_flush(struct i915_request *rq, u32 mode)
1028 cs = intel_ring_begin(rq, 2);
1034 intel_ring_advance(rq, cs);
1039 gen6_irq_enable(struct intel_engine_cs *engine)
1041 ENGINE_WRITE(engine, RING_IMR,
1042 ~(engine->irq_enable_mask | engine->irq_keep_mask));
1044 /* Flush/delay to ensure the RING_IMR is active before the GT IMR */
1045 ENGINE_POSTING_READ(engine, RING_IMR);
1047 gen5_gt_enable_irq(engine->gt, engine->irq_enable_mask);
1051 gen6_irq_disable(struct intel_engine_cs *engine)
1053 ENGINE_WRITE(engine, RING_IMR, ~engine->irq_keep_mask);
1054 gen5_gt_disable_irq(engine->gt, engine->irq_enable_mask);
1058 hsw_vebox_irq_enable(struct intel_engine_cs *engine)
1060 ENGINE_WRITE(engine, RING_IMR, ~engine->irq_enable_mask);
1062 /* Flush/delay to ensure the RING_IMR is active before the GT IMR */
1063 ENGINE_POSTING_READ(engine, RING_IMR);
1065 gen6_gt_pm_unmask_irq(engine->gt, engine->irq_enable_mask);
1069 hsw_vebox_irq_disable(struct intel_engine_cs *engine)
1071 ENGINE_WRITE(engine, RING_IMR, ~0);
1072 gen6_gt_pm_mask_irq(engine->gt, engine->irq_enable_mask);
1076 i965_emit_bb_start(struct i915_request *rq,
1077 u64 offset, u32 length,
1078 unsigned int dispatch_flags)
1082 cs = intel_ring_begin(rq, 2);
1086 *cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT | (dispatch_flags &
1087 I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965);
1089 intel_ring_advance(rq, cs);
1094 /* Just userspace ABI convention to limit the wa batch bo to a resonable size */
1095 #define I830_BATCH_LIMIT SZ_256K
1096 #define I830_TLB_ENTRIES (2)
1097 #define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT)
1099 i830_emit_bb_start(struct i915_request *rq,
1100 u64 offset, u32 len,
1101 unsigned int dispatch_flags)
1103 u32 *cs, cs_offset =
1104 intel_gt_scratch_offset(rq->engine->gt,
1105 INTEL_GT_SCRATCH_FIELD_DEFAULT);
1107 GEM_BUG_ON(rq->engine->gt->scratch->size < I830_WA_SIZE);
1109 cs = intel_ring_begin(rq, 6);
1113 /* Evict the invalid PTE TLBs */
1114 *cs++ = COLOR_BLT_CMD | BLT_WRITE_RGBA;
1115 *cs++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096;
1116 *cs++ = I830_TLB_ENTRIES << 16 | 4; /* load each page */
1120 intel_ring_advance(rq, cs);
1122 if ((dispatch_flags & I915_DISPATCH_PINNED) == 0) {
1123 if (len > I830_BATCH_LIMIT)
1126 cs = intel_ring_begin(rq, 6 + 2);
1130 /* Blit the batch (which has now all relocs applied) to the
1131 * stable batch scratch bo area (so that the CS never
1132 * stumbles over its tlb invalidation bug) ...
1134 *cs++ = SRC_COPY_BLT_CMD | BLT_WRITE_RGBA | (6 - 2);
1135 *cs++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096;
1136 *cs++ = DIV_ROUND_UP(len, 4096) << 16 | 4096;
1143 intel_ring_advance(rq, cs);
1145 /* ... and execute it. */
1149 cs = intel_ring_begin(rq, 2);
1153 *cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
1154 *cs++ = offset | (dispatch_flags & I915_DISPATCH_SECURE ? 0 :
1155 MI_BATCH_NON_SECURE);
1156 intel_ring_advance(rq, cs);
1162 i915_emit_bb_start(struct i915_request *rq,
1163 u64 offset, u32 len,
1164 unsigned int dispatch_flags)
1168 cs = intel_ring_begin(rq, 2);
1172 *cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
1173 *cs++ = offset | (dispatch_flags & I915_DISPATCH_SECURE ? 0 :
1174 MI_BATCH_NON_SECURE);
1175 intel_ring_advance(rq, cs);
1180 static void __ring_context_fini(struct intel_context *ce)
1182 i915_vma_put(ce->state);
1185 static void ring_context_destroy(struct kref *ref)
1187 struct intel_context *ce = container_of(ref, typeof(*ce), ref);
1189 GEM_BUG_ON(intel_context_is_pinned(ce));
1192 __ring_context_fini(ce);
1194 intel_context_fini(ce);
1195 intel_context_free(ce);
1198 static struct i915_address_space *vm_alias(struct intel_context *ce)
1200 struct i915_address_space *vm;
1203 if (i915_is_ggtt(vm))
1204 vm = &i915_vm_to_ggtt(vm)->alias->vm;
1209 static int __context_pin_ppgtt(struct intel_context *ce)
1211 struct i915_address_space *vm;
1216 err = gen6_ppgtt_pin(i915_vm_to_ppgtt((vm)));
1221 static void __context_unpin_ppgtt(struct intel_context *ce)
1223 struct i915_address_space *vm;
1227 gen6_ppgtt_unpin(i915_vm_to_ppgtt(vm));
1230 static void ring_context_unpin(struct intel_context *ce)
1232 __context_unpin_ppgtt(ce);
1235 static struct i915_vma *
1236 alloc_context_vma(struct intel_engine_cs *engine)
1238 struct drm_i915_private *i915 = engine->i915;
1239 struct drm_i915_gem_object *obj;
1240 struct i915_vma *vma;
1243 obj = i915_gem_object_create_shmem(i915, engine->context_size);
1245 return ERR_CAST(obj);
1248 * Try to make the context utilize L3 as well as LLC.
1250 * On VLV we don't have L3 controls in the PTEs so we
1251 * shouldn't touch the cache level, especially as that
1252 * would make the object snooped which might have a
1253 * negative performance impact.
1255 * Snooping is required on non-llc platforms in execlist
1256 * mode, but since all GGTT accesses use PAT entry 0 we
1257 * get snooping anyway regardless of cache_level.
1259 * This is only applicable for Ivy Bridge devices since
1260 * later platforms don't have L3 control bits in the PTE.
1262 if (IS_IVYBRIDGE(i915))
1263 i915_gem_object_set_cache_coherency(obj, I915_CACHE_L3_LLC);
1265 if (engine->default_state) {
1266 void *defaults, *vaddr;
1268 vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
1269 if (IS_ERR(vaddr)) {
1270 err = PTR_ERR(vaddr);
1274 defaults = i915_gem_object_pin_map(engine->default_state,
1276 if (IS_ERR(defaults)) {
1277 err = PTR_ERR(defaults);
1281 memcpy(vaddr, defaults, engine->context_size);
1282 i915_gem_object_unpin_map(engine->default_state);
1284 i915_gem_object_flush_map(obj);
1285 i915_gem_object_unpin_map(obj);
1288 vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
1297 i915_gem_object_unpin_map(obj);
1299 i915_gem_object_put(obj);
1300 return ERR_PTR(err);
1303 static int ring_context_alloc(struct intel_context *ce)
1305 struct intel_engine_cs *engine = ce->engine;
1307 /* One ringbuffer to rule them all */
1308 GEM_BUG_ON(!engine->legacy.ring);
1309 ce->ring = engine->legacy.ring;
1310 ce->timeline = intel_timeline_get(engine->legacy.timeline);
1312 GEM_BUG_ON(ce->state);
1313 if (engine->context_size) {
1314 struct i915_vma *vma;
1316 vma = alloc_context_vma(engine);
1318 return PTR_ERR(vma);
1326 static int ring_context_pin(struct intel_context *ce)
1330 err = intel_context_active_acquire(ce);
1334 err = __context_pin_ppgtt(ce);
1341 intel_context_active_release(ce);
1345 static void ring_context_reset(struct intel_context *ce)
1347 intel_ring_reset(ce->ring, 0);
1350 static const struct intel_context_ops ring_context_ops = {
1351 .alloc = ring_context_alloc,
1353 .pin = ring_context_pin,
1354 .unpin = ring_context_unpin,
1356 .enter = intel_context_enter_engine,
1357 .exit = intel_context_exit_engine,
1359 .reset = ring_context_reset,
1360 .destroy = ring_context_destroy,
1363 static int load_pd_dir(struct i915_request *rq, const struct i915_ppgtt *ppgtt)
1365 const struct intel_engine_cs * const engine = rq->engine;
1368 cs = intel_ring_begin(rq, 6);
1372 *cs++ = MI_LOAD_REGISTER_IMM(1);
1373 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine->mmio_base));
1374 *cs++ = PP_DIR_DCLV_2G;
1376 *cs++ = MI_LOAD_REGISTER_IMM(1);
1377 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine->mmio_base));
1378 *cs++ = px_base(ppgtt->pd)->ggtt_offset << 10;
1380 intel_ring_advance(rq, cs);
1385 static int flush_pd_dir(struct i915_request *rq)
1387 const struct intel_engine_cs * const engine = rq->engine;
1390 cs = intel_ring_begin(rq, 4);
1394 /* Stall until the page table load is complete */
1395 *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
1396 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine->mmio_base));
1397 *cs++ = intel_gt_scratch_offset(rq->engine->gt,
1398 INTEL_GT_SCRATCH_FIELD_DEFAULT);
1401 intel_ring_advance(rq, cs);
1405 static inline int mi_set_context(struct i915_request *rq, u32 flags)
1407 struct drm_i915_private *i915 = rq->i915;
1408 struct intel_engine_cs *engine = rq->engine;
1409 enum intel_engine_id id;
1410 const int num_engines =
1411 IS_HASWELL(i915) ? RUNTIME_INFO(i915)->num_engines - 1 : 0;
1412 bool force_restore = false;
1416 flags |= MI_MM_SPACE_GTT;
1417 if (IS_HASWELL(i915))
1418 /* These flags are for resource streamer on HSW+ */
1419 flags |= HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN;
1421 /* We need to save the extended state for powersaving modes */
1422 flags |= MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN;
1425 if (IS_GEN(i915, 7))
1426 len += 2 + (num_engines ? 4 * num_engines + 6 : 0);
1427 else if (IS_GEN(i915, 5))
1429 if (flags & MI_FORCE_RESTORE) {
1430 GEM_BUG_ON(flags & MI_RESTORE_INHIBIT);
1431 flags &= ~MI_FORCE_RESTORE;
1432 force_restore = true;
1436 cs = intel_ring_begin(rq, len);
1440 /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
1441 if (IS_GEN(i915, 7)) {
1442 *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
1444 struct intel_engine_cs *signaller;
1446 *cs++ = MI_LOAD_REGISTER_IMM(num_engines);
1447 for_each_engine(signaller, engine->gt, id) {
1448 if (signaller == engine)
1451 *cs++ = i915_mmio_reg_offset(
1452 RING_PSMI_CTL(signaller->mmio_base));
1453 *cs++ = _MASKED_BIT_ENABLE(
1454 GEN6_PSMI_SLEEP_MSG_DISABLE);
1457 } else if (IS_GEN(i915, 5)) {
1459 * This w/a is only listed for pre-production ilk a/b steppings,
1460 * but is also mentioned for programming the powerctx. To be
1461 * safe, just apply the workaround; we do not use SyncFlush so
1462 * this should never take effect and so be a no-op!
1464 *cs++ = MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN;
1467 if (force_restore) {
1469 * The HW doesn't handle being told to restore the current
1470 * context very well. Quite often it likes goes to go off and
1471 * sulk, especially when it is meant to be reloading PP_DIR.
1472 * A very simple fix to force the reload is to simply switch
1473 * away from the current context and back again.
1475 * Note that the kernel_context will contain random state
1476 * following the INHIBIT_RESTORE. We accept this since we
1477 * never use the kernel_context state; it is merely a
1478 * placeholder we use to flush other contexts.
1480 *cs++ = MI_SET_CONTEXT;
1481 *cs++ = i915_ggtt_offset(engine->kernel_context->state) |
1487 *cs++ = MI_SET_CONTEXT;
1488 *cs++ = i915_ggtt_offset(rq->hw_context->state) | flags;
1490 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
1491 * WaMiSetContext_Hang:snb,ivb,vlv
1495 if (IS_GEN(i915, 7)) {
1497 struct intel_engine_cs *signaller;
1498 i915_reg_t last_reg = {}; /* keep gcc quiet */
1500 *cs++ = MI_LOAD_REGISTER_IMM(num_engines);
1501 for_each_engine(signaller, engine->gt, id) {
1502 if (signaller == engine)
1505 last_reg = RING_PSMI_CTL(signaller->mmio_base);
1506 *cs++ = i915_mmio_reg_offset(last_reg);
1507 *cs++ = _MASKED_BIT_DISABLE(
1508 GEN6_PSMI_SLEEP_MSG_DISABLE);
1511 /* Insert a delay before the next switch! */
1512 *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
1513 *cs++ = i915_mmio_reg_offset(last_reg);
1514 *cs++ = intel_gt_scratch_offset(engine->gt,
1515 INTEL_GT_SCRATCH_FIELD_DEFAULT);
1518 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
1519 } else if (IS_GEN(i915, 5)) {
1520 *cs++ = MI_SUSPEND_FLUSH;
1523 intel_ring_advance(rq, cs);
1528 static int remap_l3_slice(struct i915_request *rq, int slice)
1530 u32 *cs, *remap_info = rq->i915->l3_parity.remap_info[slice];
1536 cs = intel_ring_begin(rq, GEN7_L3LOG_SIZE/4 * 2 + 2);
1541 * Note: We do not worry about the concurrent register cacheline hang
1542 * here because no other code should access these registers other than
1543 * at initialization time.
1545 *cs++ = MI_LOAD_REGISTER_IMM(GEN7_L3LOG_SIZE/4);
1546 for (i = 0; i < GEN7_L3LOG_SIZE/4; i++) {
1547 *cs++ = i915_mmio_reg_offset(GEN7_L3LOG(slice, i));
1548 *cs++ = remap_info[i];
1551 intel_ring_advance(rq, cs);
1556 static int remap_l3(struct i915_request *rq)
1558 struct i915_gem_context *ctx = rq->gem_context;
1561 if (!ctx->remap_slice)
1564 for (i = 0; i < MAX_L3_SLICES; i++) {
1565 if (!(ctx->remap_slice & BIT(i)))
1568 err = remap_l3_slice(rq, i);
1573 ctx->remap_slice = 0;
1577 static int switch_context(struct i915_request *rq)
1579 struct intel_context *ce = rq->hw_context;
1580 struct i915_address_space *vm = vm_alias(ce);
1583 GEM_BUG_ON(HAS_EXECLISTS(rq->i915));
1586 ret = load_pd_dir(rq, i915_vm_to_ppgtt(vm));
1594 GEM_BUG_ON(rq->engine->id != RCS0);
1597 * The kernel context(s) is treated as pure scratch and is not
1598 * expected to retain any state (as we sacrifice it during
1599 * suspend and on resume it may be corrupted). This is ok,
1600 * as nothing actually executes using the kernel context; it
1601 * is purely used for flushing user contexts.
1604 if (i915_gem_context_is_kernel(rq->gem_context))
1605 hw_flags = MI_RESTORE_INHIBIT;
1607 ret = mi_set_context(rq, hw_flags);
1613 struct intel_engine_cs *engine = rq->engine;
1615 ret = engine->emit_flush(rq, EMIT_INVALIDATE);
1619 ret = flush_pd_dir(rq);
1624 * Not only do we need a full barrier (post-sync write) after
1625 * invalidating the TLBs, but we need to wait a little bit
1626 * longer. Whether this is merely delaying us, or the
1627 * subsequent flush is a key part of serialising with the
1628 * post-sync op, this extra pass appears vital before a
1631 ret = engine->emit_flush(rq, EMIT_INVALIDATE);
1635 ret = engine->emit_flush(rq, EMIT_FLUSH);
1647 static int ring_request_alloc(struct i915_request *request)
1651 GEM_BUG_ON(!intel_context_is_pinned(request->hw_context));
1652 GEM_BUG_ON(i915_request_timeline(request)->has_initial_breadcrumb);
1655 * Flush enough space to reduce the likelihood of waiting after
1656 * we start building the request - in which case we will just
1657 * have to repeat work.
1659 request->reserved_space += LEGACY_REQUEST_SIZE;
1661 /* Unconditionally invalidate GPU caches and TLBs. */
1662 ret = request->engine->emit_flush(request, EMIT_INVALIDATE);
1666 ret = switch_context(request);
1670 request->reserved_space -= LEGACY_REQUEST_SIZE;
1674 static void gen6_bsd_submit_request(struct i915_request *request)
1676 struct intel_uncore *uncore = request->engine->uncore;
1678 intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
1680 /* Every tail move must follow the sequence below */
1682 /* Disable notification that the ring is IDLE. The GT
1683 * will then assume that it is busy and bring it out of rc6.
1685 intel_uncore_write_fw(uncore, GEN6_BSD_SLEEP_PSMI_CONTROL,
1686 _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
1688 /* Clear the context id. Here be magic! */
1689 intel_uncore_write64_fw(uncore, GEN6_BSD_RNCID, 0x0);
1691 /* Wait for the ring not to be idle, i.e. for it to wake up. */
1692 if (__intel_wait_for_register_fw(uncore,
1693 GEN6_BSD_SLEEP_PSMI_CONTROL,
1694 GEN6_BSD_SLEEP_INDICATOR,
1697 DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
1699 /* Now that the ring is fully powered up, update the tail */
1700 i9xx_submit_request(request);
1702 /* Let the ring send IDLE messages to the GT again,
1703 * and so let it sleep to conserve power when idle.
1705 intel_uncore_write_fw(uncore, GEN6_BSD_SLEEP_PSMI_CONTROL,
1706 _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
1708 intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
1711 static int mi_flush_dw(struct i915_request *rq, u32 flags)
1715 cs = intel_ring_begin(rq, 4);
1722 * We always require a command barrier so that subsequent
1723 * commands, such as breadcrumb interrupts, are strictly ordered
1724 * wrt the contents of the write cache being flushed to memory
1725 * (and thus being coherent from the CPU).
1727 cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
1730 * Bspec vol 1c.3 - blitter engine command streamer:
1731 * "If ENABLED, all TLBs will be invalidated once the flush
1732 * operation is complete. This bit is only valid when the
1733 * Post-Sync Operation field is a value of 1h or 3h."
1738 *cs++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT;
1742 intel_ring_advance(rq, cs);
1747 static int gen6_flush_dw(struct i915_request *rq, u32 mode, u32 invflags)
1749 return mi_flush_dw(rq, mode & EMIT_INVALIDATE ? invflags : 0);
1752 static int gen6_bsd_ring_flush(struct i915_request *rq, u32 mode)
1754 return gen6_flush_dw(rq, mode, MI_INVALIDATE_TLB | MI_INVALIDATE_BSD);
1758 hsw_emit_bb_start(struct i915_request *rq,
1759 u64 offset, u32 len,
1760 unsigned int dispatch_flags)
1764 cs = intel_ring_begin(rq, 2);
1768 *cs++ = MI_BATCH_BUFFER_START | (dispatch_flags & I915_DISPATCH_SECURE ?
1769 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW);
1770 /* bit0-7 is the length on GEN6+ */
1772 intel_ring_advance(rq, cs);
1778 gen6_emit_bb_start(struct i915_request *rq,
1779 u64 offset, u32 len,
1780 unsigned int dispatch_flags)
1784 cs = intel_ring_begin(rq, 2);
1788 *cs++ = MI_BATCH_BUFFER_START | (dispatch_flags & I915_DISPATCH_SECURE ?
1789 0 : MI_BATCH_NON_SECURE_I965);
1790 /* bit0-7 is the length on GEN6+ */
1792 intel_ring_advance(rq, cs);
1797 /* Blitter support (SandyBridge+) */
1799 static int gen6_ring_flush(struct i915_request *rq, u32 mode)
1801 return gen6_flush_dw(rq, mode, MI_INVALIDATE_TLB);
1804 static void i9xx_set_default_submission(struct intel_engine_cs *engine)
1806 engine->submit_request = i9xx_submit_request;
1807 engine->cancel_requests = cancel_requests;
1809 engine->park = NULL;
1810 engine->unpark = NULL;
1813 static void gen6_bsd_set_default_submission(struct intel_engine_cs *engine)
1815 i9xx_set_default_submission(engine);
1816 engine->submit_request = gen6_bsd_submit_request;
1819 static void ring_destroy(struct intel_engine_cs *engine)
1821 struct drm_i915_private *dev_priv = engine->i915;
1823 WARN_ON(INTEL_GEN(dev_priv) > 2 &&
1824 (ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0);
1826 intel_engine_cleanup_common(engine);
1828 intel_ring_unpin(engine->legacy.ring);
1829 intel_ring_put(engine->legacy.ring);
1831 intel_timeline_unpin(engine->legacy.timeline);
1832 intel_timeline_put(engine->legacy.timeline);
1837 static void setup_irq(struct intel_engine_cs *engine)
1839 struct drm_i915_private *i915 = engine->i915;
1841 if (INTEL_GEN(i915) >= 6) {
1842 engine->irq_enable = gen6_irq_enable;
1843 engine->irq_disable = gen6_irq_disable;
1844 } else if (INTEL_GEN(i915) >= 5) {
1845 engine->irq_enable = gen5_irq_enable;
1846 engine->irq_disable = gen5_irq_disable;
1847 } else if (INTEL_GEN(i915) >= 3) {
1848 engine->irq_enable = i9xx_irq_enable;
1849 engine->irq_disable = i9xx_irq_disable;
1851 engine->irq_enable = i8xx_irq_enable;
1852 engine->irq_disable = i8xx_irq_disable;
1856 static void setup_common(struct intel_engine_cs *engine)
1858 struct drm_i915_private *i915 = engine->i915;
1860 /* gen8+ are only supported with execlists */
1861 GEM_BUG_ON(INTEL_GEN(i915) >= 8);
1865 engine->destroy = ring_destroy;
1867 engine->resume = xcs_resume;
1868 engine->reset.prepare = reset_prepare;
1869 engine->reset.reset = reset_ring;
1870 engine->reset.finish = reset_finish;
1872 engine->cops = &ring_context_ops;
1873 engine->request_alloc = ring_request_alloc;
1876 * Using a global execution timeline; the previous final breadcrumb is
1877 * equivalent to our next initial bread so we can elide
1878 * engine->emit_init_breadcrumb().
1880 engine->emit_fini_breadcrumb = i9xx_emit_breadcrumb;
1881 if (IS_GEN(i915, 5))
1882 engine->emit_fini_breadcrumb = gen5_emit_breadcrumb;
1884 engine->set_default_submission = i9xx_set_default_submission;
1886 if (INTEL_GEN(i915) >= 6)
1887 engine->emit_bb_start = gen6_emit_bb_start;
1888 else if (INTEL_GEN(i915) >= 4)
1889 engine->emit_bb_start = i965_emit_bb_start;
1890 else if (IS_I830(i915) || IS_I845G(i915))
1891 engine->emit_bb_start = i830_emit_bb_start;
1893 engine->emit_bb_start = i915_emit_bb_start;
1896 static void setup_rcs(struct intel_engine_cs *engine)
1898 struct drm_i915_private *i915 = engine->i915;
1900 if (HAS_L3_DPF(i915))
1901 engine->irq_keep_mask = GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
1903 engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
1905 if (INTEL_GEN(i915) >= 7) {
1906 engine->emit_flush = gen7_render_ring_flush;
1907 engine->emit_fini_breadcrumb = gen7_rcs_emit_breadcrumb;
1908 } else if (IS_GEN(i915, 6)) {
1909 engine->emit_flush = gen6_render_ring_flush;
1910 engine->emit_fini_breadcrumb = gen6_rcs_emit_breadcrumb;
1911 } else if (IS_GEN(i915, 5)) {
1912 engine->emit_flush = gen4_render_ring_flush;
1914 if (INTEL_GEN(i915) < 4)
1915 engine->emit_flush = gen2_render_ring_flush;
1917 engine->emit_flush = gen4_render_ring_flush;
1918 engine->irq_enable_mask = I915_USER_INTERRUPT;
1921 if (IS_HASWELL(i915))
1922 engine->emit_bb_start = hsw_emit_bb_start;
1924 engine->resume = rcs_resume;
1927 static void setup_vcs(struct intel_engine_cs *engine)
1929 struct drm_i915_private *i915 = engine->i915;
1931 if (INTEL_GEN(i915) >= 6) {
1932 /* gen6 bsd needs a special wa for tail updates */
1933 if (IS_GEN(i915, 6))
1934 engine->set_default_submission = gen6_bsd_set_default_submission;
1935 engine->emit_flush = gen6_bsd_ring_flush;
1936 engine->irq_enable_mask = GT_BSD_USER_INTERRUPT;
1938 if (IS_GEN(i915, 6))
1939 engine->emit_fini_breadcrumb = gen6_xcs_emit_breadcrumb;
1941 engine->emit_fini_breadcrumb = gen7_xcs_emit_breadcrumb;
1943 engine->emit_flush = bsd_ring_flush;
1944 if (IS_GEN(i915, 5))
1945 engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
1947 engine->irq_enable_mask = I915_BSD_USER_INTERRUPT;
1951 static void setup_bcs(struct intel_engine_cs *engine)
1953 struct drm_i915_private *i915 = engine->i915;
1955 engine->emit_flush = gen6_ring_flush;
1956 engine->irq_enable_mask = GT_BLT_USER_INTERRUPT;
1958 if (IS_GEN(i915, 6))
1959 engine->emit_fini_breadcrumb = gen6_xcs_emit_breadcrumb;
1961 engine->emit_fini_breadcrumb = gen7_xcs_emit_breadcrumb;
1964 static void setup_vecs(struct intel_engine_cs *engine)
1966 struct drm_i915_private *i915 = engine->i915;
1968 GEM_BUG_ON(INTEL_GEN(i915) < 7);
1970 engine->emit_flush = gen6_ring_flush;
1971 engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
1972 engine->irq_enable = hsw_vebox_irq_enable;
1973 engine->irq_disable = hsw_vebox_irq_disable;
1975 engine->emit_fini_breadcrumb = gen7_xcs_emit_breadcrumb;
1978 int intel_ring_submission_setup(struct intel_engine_cs *engine)
1980 setup_common(engine);
1982 switch (engine->class) {
1986 case VIDEO_DECODE_CLASS:
1989 case COPY_ENGINE_CLASS:
1992 case VIDEO_ENHANCEMENT_CLASS:
1996 MISSING_CASE(engine->class);
2003 int intel_ring_submission_init(struct intel_engine_cs *engine)
2005 struct intel_timeline *timeline;
2006 struct intel_ring *ring;
2009 timeline = intel_timeline_create(engine->gt, engine->status_page.vma);
2010 if (IS_ERR(timeline)) {
2011 err = PTR_ERR(timeline);
2014 GEM_BUG_ON(timeline->has_initial_breadcrumb);
2016 err = intel_timeline_pin(timeline);
2020 ring = intel_engine_create_ring(engine, SZ_16K);
2022 err = PTR_ERR(ring);
2023 goto err_timeline_unpin;
2026 err = intel_ring_pin(ring);
2030 GEM_BUG_ON(engine->legacy.ring);
2031 engine->legacy.ring = ring;
2032 engine->legacy.timeline = timeline;
2034 err = intel_engine_init_common(engine);
2036 goto err_ring_unpin;
2038 GEM_BUG_ON(timeline->hwsp_ggtt != engine->status_page.vma);
2043 intel_ring_unpin(ring);
2045 intel_ring_put(ring);
2047 intel_timeline_unpin(timeline);
2049 intel_timeline_put(timeline);
2051 intel_engine_cleanup_common(engine);