2 * Copyright © 2008-2010 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
25 * Zou Nan hai <nanhai.zou@intel.com>
26 * Xiang Hai hao<haihao.xiang@intel.com>
30 #include <linux/log2.h>
32 #include <drm/i915_drm.h>
35 #include "i915_gem_render_state.h"
36 #include "i915_reset.h"
37 #include "i915_trace.h"
38 #include "intel_drv.h"
39 #include "intel_workarounds.h"
41 /* Rough estimate of the typical request size, performing a flush,
42 * set-context and then emitting the batch.
44 #define LEGACY_REQUEST_SIZE 200
46 unsigned int intel_ring_update_space(struct intel_ring *ring)
50 space = __intel_ring_space(ring->head, ring->emit, ring->size);
57 gen2_render_ring_flush(struct i915_request *rq, u32 mode)
59 unsigned int num_store_dw;
64 if (mode & EMIT_INVALIDATE)
66 if (mode & EMIT_FLUSH)
69 cs = intel_ring_begin(rq, 2 + 3 * num_store_dw);
74 while (num_store_dw--) {
75 *cs++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
76 *cs++ = i915_scratch_offset(rq->i915);
79 *cs++ = MI_FLUSH | MI_NO_WRITE_FLUSH;
81 intel_ring_advance(rq, cs);
87 gen4_render_ring_flush(struct i915_request *rq, u32 mode)
95 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
96 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
97 * also flushed at 2d versus 3d pipeline switches.
101 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
102 * MI_READ_FLUSH is set, and is always flushed on 965.
104 * I915_GEM_DOMAIN_COMMAND may not exist?
106 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
107 * invalidated when MI_EXE_FLUSH is set.
109 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
110 * invalidated with every MI_FLUSH.
114 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
115 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
116 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
117 * are flushed at any MI_FLUSH.
121 if (mode & EMIT_INVALIDATE) {
123 if (IS_G4X(rq->i915) || IS_GEN(rq->i915, 5))
124 cmd |= MI_INVALIDATE_ISP;
128 if (mode & EMIT_INVALIDATE)
131 cs = intel_ring_begin(rq, i);
138 * A random delay to let the CS invalidate take effect? Without this
139 * delay, the GPU relocation path fails as the CS does not see
140 * the updated contents. Just as important, if we apply the flushes
141 * to the EMIT_FLUSH branch (i.e. immediately after the relocation
142 * write and before the invalidate on the next batch), the relocations
143 * still fail. This implies that is a delay following invalidation
144 * that is required to reset the caches as opposed to a delay to
145 * ensure the memory is written.
147 if (mode & EMIT_INVALIDATE) {
148 *cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE;
149 *cs++ = i915_scratch_offset(rq->i915) | PIPE_CONTROL_GLOBAL_GTT;
153 for (i = 0; i < 12; i++)
156 *cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE;
157 *cs++ = i915_scratch_offset(rq->i915) | PIPE_CONTROL_GLOBAL_GTT;
164 intel_ring_advance(rq, cs);
170 * Emits a PIPE_CONTROL with a non-zero post-sync operation, for
171 * implementing two workarounds on gen6. From section 1.4.7.1
172 * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
174 * [DevSNB-C+{W/A}] Before any depth stall flush (including those
175 * produced by non-pipelined state commands), software needs to first
176 * send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
179 * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
180 * =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
182 * And the workaround for these two requires this workaround first:
184 * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
185 * BEFORE the pipe-control with a post-sync op and no write-cache
188 * And this last workaround is tricky because of the requirements on
189 * that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM
192 * "1 of the following must also be set:
193 * - Render Target Cache Flush Enable ([12] of DW1)
194 * - Depth Cache Flush Enable ([0] of DW1)
195 * - Stall at Pixel Scoreboard ([1] of DW1)
196 * - Depth Stall ([13] of DW1)
197 * - Post-Sync Operation ([13] of DW1)
198 * - Notify Enable ([8] of DW1)"
200 * The cache flushes require the workaround flush that triggered this
201 * one, so we can't use it. Depth stall would trigger the same.
202 * Post-sync nonzero is what triggered this second workaround, so we
203 * can't use that one either. Notify enable is IRQs, which aren't
204 * really our business. That leaves only stall at scoreboard.
207 gen6_emit_post_sync_nonzero_flush(struct i915_request *rq)
209 u32 scratch_addr = i915_scratch_offset(rq->i915) + 2 * CACHELINE_BYTES;
212 cs = intel_ring_begin(rq, 6);
216 *cs++ = GFX_OP_PIPE_CONTROL(5);
217 *cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD;
218 *cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
219 *cs++ = 0; /* low dword */
220 *cs++ = 0; /* high dword */
222 intel_ring_advance(rq, cs);
224 cs = intel_ring_begin(rq, 6);
228 *cs++ = GFX_OP_PIPE_CONTROL(5);
229 *cs++ = PIPE_CONTROL_QW_WRITE;
230 *cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
234 intel_ring_advance(rq, cs);
240 gen6_render_ring_flush(struct i915_request *rq, u32 mode)
242 u32 scratch_addr = i915_scratch_offset(rq->i915) + 2 * CACHELINE_BYTES;
246 /* Force SNB workarounds for PIPE_CONTROL flushes */
247 ret = gen6_emit_post_sync_nonzero_flush(rq);
251 /* Just flush everything. Experiments have shown that reducing the
252 * number of bits based on the write domains has little performance
255 if (mode & EMIT_FLUSH) {
256 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
257 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
259 * Ensure that any following seqno writes only happen
260 * when the render cache is indeed flushed.
262 flags |= PIPE_CONTROL_CS_STALL;
264 if (mode & EMIT_INVALIDATE) {
265 flags |= PIPE_CONTROL_TLB_INVALIDATE;
266 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
267 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
268 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
269 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
270 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
272 * TLB invalidate requires a post-sync write.
274 flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
277 cs = intel_ring_begin(rq, 4);
281 *cs++ = GFX_OP_PIPE_CONTROL(4);
283 *cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
285 intel_ring_advance(rq, cs);
290 static u32 *gen6_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
292 /* First we do the gen6_emit_post_sync_nonzero_flush w/a */
293 *cs++ = GFX_OP_PIPE_CONTROL(4);
294 *cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD;
298 *cs++ = GFX_OP_PIPE_CONTROL(4);
299 *cs++ = PIPE_CONTROL_QW_WRITE;
300 *cs++ = i915_scratch_offset(rq->i915) | PIPE_CONTROL_GLOBAL_GTT;
303 /* Finally we can flush and with it emit the breadcrumb */
304 *cs++ = GFX_OP_PIPE_CONTROL(4);
305 *cs++ = (PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
306 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
307 PIPE_CONTROL_DC_FLUSH_ENABLE |
308 PIPE_CONTROL_QW_WRITE |
309 PIPE_CONTROL_CS_STALL);
310 *cs++ = rq->timeline->hwsp_offset | PIPE_CONTROL_GLOBAL_GTT;
311 *cs++ = rq->fence.seqno;
313 *cs++ = GFX_OP_PIPE_CONTROL(4);
314 *cs++ = PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_STORE_DATA_INDEX;
315 *cs++ = I915_GEM_HWS_HANGCHECK_ADDR | PIPE_CONTROL_GLOBAL_GTT;
316 *cs++ = intel_engine_next_hangcheck_seqno(rq->engine);
318 *cs++ = MI_USER_INTERRUPT;
321 rq->tail = intel_ring_offset(rq, cs);
322 assert_ring_tail_valid(rq->ring, rq->tail);
328 gen7_render_ring_cs_stall_wa(struct i915_request *rq)
332 cs = intel_ring_begin(rq, 4);
336 *cs++ = GFX_OP_PIPE_CONTROL(4);
337 *cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD;
340 intel_ring_advance(rq, cs);
346 gen7_render_ring_flush(struct i915_request *rq, u32 mode)
348 u32 scratch_addr = i915_scratch_offset(rq->i915) + 2 * CACHELINE_BYTES;
352 * Ensure that any following seqno writes only happen when the render
353 * cache is indeed flushed.
355 * Workaround: 4th PIPE_CONTROL command (except the ones with only
356 * read-cache invalidate bits set) must have the CS_STALL bit set. We
357 * don't try to be clever and just set it unconditionally.
359 flags |= PIPE_CONTROL_CS_STALL;
361 /* Just flush everything. Experiments have shown that reducing the
362 * number of bits based on the write domains has little performance
365 if (mode & EMIT_FLUSH) {
366 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
367 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
368 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
369 flags |= PIPE_CONTROL_FLUSH_ENABLE;
371 if (mode & EMIT_INVALIDATE) {
372 flags |= PIPE_CONTROL_TLB_INVALIDATE;
373 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
374 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
375 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
376 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
377 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
378 flags |= PIPE_CONTROL_MEDIA_STATE_CLEAR;
380 * TLB invalidate requires a post-sync write.
382 flags |= PIPE_CONTROL_QW_WRITE;
383 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
385 flags |= PIPE_CONTROL_STALL_AT_SCOREBOARD;
387 /* Workaround: we must issue a pipe_control with CS-stall bit
388 * set before a pipe_control command that has the state cache
389 * invalidate bit set. */
390 gen7_render_ring_cs_stall_wa(rq);
393 cs = intel_ring_begin(rq, 4);
397 *cs++ = GFX_OP_PIPE_CONTROL(4);
399 *cs++ = scratch_addr;
401 intel_ring_advance(rq, cs);
406 static u32 *gen7_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
408 *cs++ = GFX_OP_PIPE_CONTROL(4);
409 *cs++ = (PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
410 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
411 PIPE_CONTROL_DC_FLUSH_ENABLE |
412 PIPE_CONTROL_FLUSH_ENABLE |
413 PIPE_CONTROL_QW_WRITE |
414 PIPE_CONTROL_GLOBAL_GTT_IVB |
415 PIPE_CONTROL_CS_STALL);
416 *cs++ = rq->timeline->hwsp_offset;
417 *cs++ = rq->fence.seqno;
419 *cs++ = GFX_OP_PIPE_CONTROL(4);
420 *cs++ = (PIPE_CONTROL_QW_WRITE |
421 PIPE_CONTROL_STORE_DATA_INDEX |
422 PIPE_CONTROL_GLOBAL_GTT_IVB);
423 *cs++ = I915_GEM_HWS_HANGCHECK_ADDR;
424 *cs++ = intel_engine_next_hangcheck_seqno(rq->engine);
426 *cs++ = MI_USER_INTERRUPT;
429 rq->tail = intel_ring_offset(rq, cs);
430 assert_ring_tail_valid(rq->ring, rq->tail);
435 static u32 *gen6_xcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
437 GEM_BUG_ON(rq->timeline->hwsp_ggtt != rq->engine->status_page.vma);
438 GEM_BUG_ON(offset_in_page(rq->timeline->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
440 *cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX;
441 *cs++ = I915_GEM_HWS_SEQNO_ADDR | MI_FLUSH_DW_USE_GTT;
442 *cs++ = rq->fence.seqno;
444 *cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX;
445 *cs++ = I915_GEM_HWS_HANGCHECK_ADDR | MI_FLUSH_DW_USE_GTT;
446 *cs++ = intel_engine_next_hangcheck_seqno(rq->engine);
448 *cs++ = MI_USER_INTERRUPT;
451 rq->tail = intel_ring_offset(rq, cs);
452 assert_ring_tail_valid(rq->ring, rq->tail);
457 #define GEN7_XCS_WA 32
458 static u32 *gen7_xcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
462 GEM_BUG_ON(rq->timeline->hwsp_ggtt != rq->engine->status_page.vma);
463 GEM_BUG_ON(offset_in_page(rq->timeline->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
465 *cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX;
466 *cs++ = I915_GEM_HWS_SEQNO_ADDR | MI_FLUSH_DW_USE_GTT;
467 *cs++ = rq->fence.seqno;
469 *cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX;
470 *cs++ = I915_GEM_HWS_HANGCHECK_ADDR | MI_FLUSH_DW_USE_GTT;
471 *cs++ = intel_engine_next_hangcheck_seqno(rq->engine);
473 for (i = 0; i < GEN7_XCS_WA; i++) {
474 *cs++ = MI_STORE_DWORD_INDEX;
475 *cs++ = I915_GEM_HWS_SEQNO_ADDR;
476 *cs++ = rq->fence.seqno;
483 *cs++ = MI_USER_INTERRUPT;
485 rq->tail = intel_ring_offset(rq, cs);
486 assert_ring_tail_valid(rq->ring, rq->tail);
492 static void set_hwstam(struct intel_engine_cs *engine, u32 mask)
495 * Keep the render interrupt unmasked as this papers over
496 * lost interrupts following a reset.
498 if (engine->class == RENDER_CLASS) {
499 if (INTEL_GEN(engine->i915) >= 6)
502 mask &= ~I915_USER_INTERRUPT;
505 intel_engine_set_hwsp_writemask(engine, mask);
508 static void set_hws_pga(struct intel_engine_cs *engine, phys_addr_t phys)
510 struct drm_i915_private *dev_priv = engine->i915;
513 addr = lower_32_bits(phys);
514 if (INTEL_GEN(dev_priv) >= 4)
515 addr |= (phys >> 28) & 0xf0;
517 I915_WRITE(HWS_PGA, addr);
520 static struct page *status_page(struct intel_engine_cs *engine)
522 struct drm_i915_gem_object *obj = engine->status_page.vma->obj;
524 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
525 return sg_page(obj->mm.pages->sgl);
528 static void ring_setup_phys_status_page(struct intel_engine_cs *engine)
530 set_hws_pga(engine, PFN_PHYS(page_to_pfn(status_page(engine))));
531 set_hwstam(engine, ~0u);
534 static void set_hwsp(struct intel_engine_cs *engine, u32 offset)
536 struct drm_i915_private *dev_priv = engine->i915;
540 * The ring status page addresses are no longer next to the rest of
541 * the ring registers as of gen7.
543 if (IS_GEN(dev_priv, 7)) {
544 switch (engine->id) {
546 * No more rings exist on Gen7. Default case is only to shut up
547 * gcc switch check warning.
550 GEM_BUG_ON(engine->id);
553 hwsp = RENDER_HWS_PGA_GEN7;
556 hwsp = BLT_HWS_PGA_GEN7;
559 hwsp = BSD_HWS_PGA_GEN7;
562 hwsp = VEBOX_HWS_PGA_GEN7;
565 } else if (IS_GEN(dev_priv, 6)) {
566 hwsp = RING_HWS_PGA_GEN6(engine->mmio_base);
568 hwsp = RING_HWS_PGA(engine->mmio_base);
571 I915_WRITE(hwsp, offset);
575 static void flush_cs_tlb(struct intel_engine_cs *engine)
577 struct drm_i915_private *dev_priv = engine->i915;
578 i915_reg_t instpm = RING_INSTPM(engine->mmio_base);
580 if (!IS_GEN_RANGE(dev_priv, 6, 7))
583 /* ring should be idle before issuing a sync flush*/
584 WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0);
587 _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
589 if (intel_wait_for_register(&dev_priv->uncore,
590 instpm, INSTPM_SYNC_FLUSH, 0,
592 DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
596 static void ring_setup_status_page(struct intel_engine_cs *engine)
598 set_hwsp(engine, i915_ggtt_offset(engine->status_page.vma));
599 set_hwstam(engine, ~0u);
601 flush_cs_tlb(engine);
604 static bool stop_ring(struct intel_engine_cs *engine)
606 struct drm_i915_private *dev_priv = engine->i915;
608 if (INTEL_GEN(dev_priv) > 2) {
609 I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING));
610 if (intel_wait_for_register(&dev_priv->uncore,
611 RING_MI_MODE(engine->mmio_base),
615 DRM_ERROR("%s : timed out trying to stop ring\n",
617 /* Sometimes we observe that the idle flag is not
618 * set even though the ring is empty. So double
619 * check before giving up.
621 if (I915_READ_HEAD(engine) != I915_READ_TAIL(engine))
626 I915_WRITE_HEAD(engine, I915_READ_TAIL(engine));
628 I915_WRITE_HEAD(engine, 0);
629 I915_WRITE_TAIL(engine, 0);
631 /* The ring must be empty before it is disabled */
632 I915_WRITE_CTL(engine, 0);
634 return (I915_READ_HEAD(engine) & HEAD_ADDR) == 0;
637 static int init_ring_common(struct intel_engine_cs *engine)
639 struct drm_i915_private *dev_priv = engine->i915;
640 struct intel_ring *ring = engine->buffer;
643 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
645 if (!stop_ring(engine)) {
646 /* G45 ring initialization often fails to reset head to zero */
647 DRM_DEBUG_DRIVER("%s head not reset to zero "
648 "ctl %08x head %08x tail %08x start %08x\n",
650 I915_READ_CTL(engine),
651 I915_READ_HEAD(engine),
652 I915_READ_TAIL(engine),
653 I915_READ_START(engine));
655 if (!stop_ring(engine)) {
656 DRM_ERROR("failed to set %s head to zero "
657 "ctl %08x head %08x tail %08x start %08x\n",
659 I915_READ_CTL(engine),
660 I915_READ_HEAD(engine),
661 I915_READ_TAIL(engine),
662 I915_READ_START(engine));
668 if (HWS_NEEDS_PHYSICAL(dev_priv))
669 ring_setup_phys_status_page(engine);
671 ring_setup_status_page(engine);
673 intel_engine_reset_breadcrumbs(engine);
675 /* Enforce ordering by reading HEAD register back */
676 I915_READ_HEAD(engine);
678 /* Initialize the ring. This must happen _after_ we've cleared the ring
679 * registers with the above sequence (the readback of the HEAD registers
680 * also enforces ordering), otherwise the hw might lose the new ring
681 * register values. */
682 I915_WRITE_START(engine, i915_ggtt_offset(ring->vma));
684 /* WaClearRingBufHeadRegAtInit:ctg,elk */
685 if (I915_READ_HEAD(engine))
686 DRM_DEBUG_DRIVER("%s initialization failed [head=%08x], fudging\n",
687 engine->name, I915_READ_HEAD(engine));
689 /* Check that the ring offsets point within the ring! */
690 GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head));
691 GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail));
692 intel_ring_update_space(ring);
694 /* First wake the ring up to an empty/idle ring */
695 I915_WRITE_HEAD(engine, ring->head);
696 I915_WRITE_TAIL(engine, ring->head);
697 (void)I915_READ_TAIL(engine);
699 I915_WRITE_CTL(engine, RING_CTL_SIZE(ring->size) | RING_VALID);
701 /* If the head is still not zero, the ring is dead */
702 if (intel_wait_for_register(&dev_priv->uncore,
703 RING_CTL(engine->mmio_base),
704 RING_VALID, RING_VALID,
706 DRM_ERROR("%s initialization failed "
707 "ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n",
709 I915_READ_CTL(engine),
710 I915_READ_CTL(engine) & RING_VALID,
711 I915_READ_HEAD(engine), ring->head,
712 I915_READ_TAIL(engine), ring->tail,
713 I915_READ_START(engine),
714 i915_ggtt_offset(ring->vma));
719 if (INTEL_GEN(dev_priv) > 2)
720 I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
722 /* Now awake, let it get started */
723 if (ring->tail != ring->head) {
724 I915_WRITE_TAIL(engine, ring->tail);
725 (void)I915_READ_TAIL(engine);
728 /* Papering over lost _interrupts_ immediately following the restart */
729 intel_engine_queue_breadcrumbs(engine);
731 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
736 static void reset_prepare(struct intel_engine_cs *engine)
738 intel_engine_stop_cs(engine);
741 static void reset_ring(struct intel_engine_cs *engine, bool stalled)
743 struct i915_timeline *tl = &engine->timeline;
744 struct i915_request *pos, *rq;
749 spin_lock_irqsave(&tl->lock, flags);
750 list_for_each_entry(pos, &tl->requests, link) {
751 if (!i915_request_completed(pos)) {
758 * The guilty request will get skipped on a hung engine.
760 * Users of client default contexts do not rely on logical
761 * state preserved between batches so it is safe to execute
762 * queued requests following the hang. Non default contexts
763 * rely on preserved state, so skipping a batch loses the
764 * evolution of the state and it needs to be considered corrupted.
765 * Executing more queued batches on top of corrupted state is
766 * risky. But we take the risk by trying to advance through
767 * the queued requests in order to make the client behaviour
768 * more predictable around resets, by not throwing away random
769 * amount of batches it has prepared for execution. Sophisticated
770 * clients can use gem_reset_stats_ioctl and dma fence status
771 * (exported via sync_file info ioctl on explicit fences) to observe
772 * when it loses the context state and should rebuild accordingly.
774 * The context ban, and ultimately the client ban, mechanism are safety
775 * valves if client submission ends up resulting in nothing more than
781 * Try to restore the logical GPU state to match the
782 * continuation of the request queue. If we skip the
783 * context/PD restore, then the next request may try to execute
784 * assuming that its context is valid and loaded on the GPU and
785 * so may try to access invalid memory, prompting repeated GPU
788 * If the request was guilty, we still restore the logical
789 * state in case the next request requires it (e.g. the
790 * aliasing ppgtt), but skip over the hung batch.
792 * If the request was innocent, we try to replay the request
793 * with the restored context.
795 i915_reset_request(rq, stalled);
797 GEM_BUG_ON(rq->ring != engine->buffer);
800 head = engine->buffer->tail;
802 engine->buffer->head = intel_ring_wrap(engine->buffer, head);
804 spin_unlock_irqrestore(&tl->lock, flags);
807 static void reset_finish(struct intel_engine_cs *engine)
811 static int intel_rcs_ctx_init(struct i915_request *rq)
815 ret = intel_engine_emit_ctx_wa(rq);
819 ret = i915_gem_render_state_emit(rq);
826 static int init_render_ring(struct intel_engine_cs *engine)
828 struct drm_i915_private *dev_priv = engine->i915;
829 int ret = init_ring_common(engine);
833 /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
834 if (IS_GEN_RANGE(dev_priv, 4, 6))
835 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
837 /* We need to disable the AsyncFlip performance optimisations in order
838 * to use MI_WAIT_FOR_EVENT within the CS. It should already be
839 * programmed to '1' on all products.
841 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
843 if (IS_GEN_RANGE(dev_priv, 6, 7))
844 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
846 /* Required for the hardware to program scanline values for waiting */
847 /* WaEnableFlushTlbInvalidationMode:snb */
848 if (IS_GEN(dev_priv, 6))
850 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT));
852 /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
853 if (IS_GEN(dev_priv, 7))
854 I915_WRITE(GFX_MODE_GEN7,
855 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) |
856 _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
858 if (IS_GEN(dev_priv, 6)) {
859 /* From the Sandybridge PRM, volume 1 part 3, page 24:
860 * "If this bit is set, STCunit will have LRA as replacement
861 * policy. [...] This bit must be reset. LRA replacement
862 * policy is not supported."
864 I915_WRITE(CACHE_MODE_0,
865 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
868 if (IS_GEN_RANGE(dev_priv, 6, 7))
869 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
871 if (INTEL_GEN(dev_priv) >= 6)
872 I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
877 static void cancel_requests(struct intel_engine_cs *engine)
879 struct i915_request *request;
882 spin_lock_irqsave(&engine->timeline.lock, flags);
884 /* Mark all submitted requests as skipped. */
885 list_for_each_entry(request, &engine->timeline.requests, link) {
886 if (!i915_request_signaled(request))
887 dma_fence_set_error(&request->fence, -EIO);
889 i915_request_mark_complete(request);
892 /* Remaining _unready_ requests will be nop'ed when submitted */
894 spin_unlock_irqrestore(&engine->timeline.lock, flags);
897 static void i9xx_submit_request(struct i915_request *request)
899 struct drm_i915_private *dev_priv = request->i915;
901 i915_request_submit(request);
903 I915_WRITE_TAIL(request->engine,
904 intel_ring_set_tail(request->ring, request->tail));
907 static u32 *i9xx_emit_breadcrumb(struct i915_request *rq, u32 *cs)
909 GEM_BUG_ON(rq->timeline->hwsp_ggtt != rq->engine->status_page.vma);
910 GEM_BUG_ON(offset_in_page(rq->timeline->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
914 *cs++ = MI_STORE_DWORD_INDEX;
915 *cs++ = I915_GEM_HWS_SEQNO_ADDR;
916 *cs++ = rq->fence.seqno;
918 *cs++ = MI_STORE_DWORD_INDEX;
919 *cs++ = I915_GEM_HWS_HANGCHECK_ADDR;
920 *cs++ = intel_engine_next_hangcheck_seqno(rq->engine);
922 *cs++ = MI_USER_INTERRUPT;
924 rq->tail = intel_ring_offset(rq, cs);
925 assert_ring_tail_valid(rq->ring, rq->tail);
930 #define GEN5_WA_STORES 8 /* must be at least 1! */
931 static u32 *gen5_emit_breadcrumb(struct i915_request *rq, u32 *cs)
935 GEM_BUG_ON(rq->timeline->hwsp_ggtt != rq->engine->status_page.vma);
936 GEM_BUG_ON(offset_in_page(rq->timeline->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
940 *cs++ = MI_STORE_DWORD_INDEX;
941 *cs++ = I915_GEM_HWS_HANGCHECK_ADDR;
942 *cs++ = intel_engine_next_hangcheck_seqno(rq->engine);
944 BUILD_BUG_ON(GEN5_WA_STORES < 1);
945 for (i = 0; i < GEN5_WA_STORES; i++) {
946 *cs++ = MI_STORE_DWORD_INDEX;
947 *cs++ = I915_GEM_HWS_SEQNO_ADDR;
948 *cs++ = rq->fence.seqno;
951 *cs++ = MI_USER_INTERRUPT;
954 rq->tail = intel_ring_offset(rq, cs);
955 assert_ring_tail_valid(rq->ring, rq->tail);
959 #undef GEN5_WA_STORES
962 gen5_irq_enable(struct intel_engine_cs *engine)
964 gen5_enable_gt_irq(engine->i915, engine->irq_enable_mask);
968 gen5_irq_disable(struct intel_engine_cs *engine)
970 gen5_disable_gt_irq(engine->i915, engine->irq_enable_mask);
974 i9xx_irq_enable(struct intel_engine_cs *engine)
976 struct drm_i915_private *dev_priv = engine->i915;
978 dev_priv->irq_mask &= ~engine->irq_enable_mask;
979 I915_WRITE(IMR, dev_priv->irq_mask);
980 POSTING_READ_FW(RING_IMR(engine->mmio_base));
984 i9xx_irq_disable(struct intel_engine_cs *engine)
986 struct drm_i915_private *dev_priv = engine->i915;
988 dev_priv->irq_mask |= engine->irq_enable_mask;
989 I915_WRITE(IMR, dev_priv->irq_mask);
993 i8xx_irq_enable(struct intel_engine_cs *engine)
995 struct drm_i915_private *dev_priv = engine->i915;
997 dev_priv->irq_mask &= ~engine->irq_enable_mask;
998 I915_WRITE16(IMR, dev_priv->irq_mask);
999 POSTING_READ16(RING_IMR(engine->mmio_base));
1003 i8xx_irq_disable(struct intel_engine_cs *engine)
1005 struct drm_i915_private *dev_priv = engine->i915;
1007 dev_priv->irq_mask |= engine->irq_enable_mask;
1008 I915_WRITE16(IMR, dev_priv->irq_mask);
1012 bsd_ring_flush(struct i915_request *rq, u32 mode)
1016 cs = intel_ring_begin(rq, 2);
1022 intel_ring_advance(rq, cs);
1027 gen6_irq_enable(struct intel_engine_cs *engine)
1029 struct drm_i915_private *dev_priv = engine->i915;
1031 I915_WRITE_IMR(engine,
1032 ~(engine->irq_enable_mask |
1033 engine->irq_keep_mask));
1035 /* Flush/delay to ensure the RING_IMR is active before the GT IMR */
1036 POSTING_READ_FW(RING_IMR(engine->mmio_base));
1038 gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask);
1042 gen6_irq_disable(struct intel_engine_cs *engine)
1044 struct drm_i915_private *dev_priv = engine->i915;
1046 I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
1047 gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask);
1051 hsw_vebox_irq_enable(struct intel_engine_cs *engine)
1053 struct drm_i915_private *dev_priv = engine->i915;
1055 I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
1057 /* Flush/delay to ensure the RING_IMR is active before the GT IMR */
1058 POSTING_READ_FW(RING_IMR(engine->mmio_base));
1060 gen6_unmask_pm_irq(dev_priv, engine->irq_enable_mask);
1064 hsw_vebox_irq_disable(struct intel_engine_cs *engine)
1066 struct drm_i915_private *dev_priv = engine->i915;
1068 I915_WRITE_IMR(engine, ~0);
1069 gen6_mask_pm_irq(dev_priv, engine->irq_enable_mask);
1073 i965_emit_bb_start(struct i915_request *rq,
1074 u64 offset, u32 length,
1075 unsigned int dispatch_flags)
1079 cs = intel_ring_begin(rq, 2);
1083 *cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT | (dispatch_flags &
1084 I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965);
1086 intel_ring_advance(rq, cs);
1091 /* Just userspace ABI convention to limit the wa batch bo to a resonable size */
1092 #define I830_BATCH_LIMIT SZ_256K
1093 #define I830_TLB_ENTRIES (2)
1094 #define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT)
1096 i830_emit_bb_start(struct i915_request *rq,
1097 u64 offset, u32 len,
1098 unsigned int dispatch_flags)
1100 u32 *cs, cs_offset = i915_scratch_offset(rq->i915);
1102 GEM_BUG_ON(rq->i915->gt.scratch->size < I830_WA_SIZE);
1104 cs = intel_ring_begin(rq, 6);
1108 /* Evict the invalid PTE TLBs */
1109 *cs++ = COLOR_BLT_CMD | BLT_WRITE_RGBA;
1110 *cs++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096;
1111 *cs++ = I830_TLB_ENTRIES << 16 | 4; /* load each page */
1115 intel_ring_advance(rq, cs);
1117 if ((dispatch_flags & I915_DISPATCH_PINNED) == 0) {
1118 if (len > I830_BATCH_LIMIT)
1121 cs = intel_ring_begin(rq, 6 + 2);
1125 /* Blit the batch (which has now all relocs applied) to the
1126 * stable batch scratch bo area (so that the CS never
1127 * stumbles over its tlb invalidation bug) ...
1129 *cs++ = SRC_COPY_BLT_CMD | BLT_WRITE_RGBA;
1130 *cs++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096;
1131 *cs++ = DIV_ROUND_UP(len, 4096) << 16 | 4096;
1138 intel_ring_advance(rq, cs);
1140 /* ... and execute it. */
1144 cs = intel_ring_begin(rq, 2);
1148 *cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
1149 *cs++ = offset | (dispatch_flags & I915_DISPATCH_SECURE ? 0 :
1150 MI_BATCH_NON_SECURE);
1151 intel_ring_advance(rq, cs);
1157 i915_emit_bb_start(struct i915_request *rq,
1158 u64 offset, u32 len,
1159 unsigned int dispatch_flags)
1163 cs = intel_ring_begin(rq, 2);
1167 *cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
1168 *cs++ = offset | (dispatch_flags & I915_DISPATCH_SECURE ? 0 :
1169 MI_BATCH_NON_SECURE);
1170 intel_ring_advance(rq, cs);
1175 int intel_ring_pin(struct intel_ring *ring)
1177 struct i915_vma *vma = ring->vma;
1178 enum i915_map_type map = i915_coherent_map_type(vma->vm->i915);
1183 GEM_BUG_ON(ring->vaddr);
1185 ret = i915_timeline_pin(ring->timeline);
1191 /* Ring wraparound at offset 0 sometimes hangs. No idea why. */
1192 flags |= PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
1194 if (vma->obj->stolen)
1195 flags |= PIN_MAPPABLE;
1199 ret = i915_vma_pin(vma, 0, 0, flags);
1201 goto unpin_timeline;
1203 if (i915_vma_is_map_and_fenceable(vma))
1204 addr = (void __force *)i915_vma_pin_iomap(vma);
1206 addr = i915_gem_object_pin_map(vma->obj, map);
1208 ret = PTR_ERR(addr);
1212 vma->obj->pin_global++;
1218 i915_vma_unpin(vma);
1220 i915_timeline_unpin(ring->timeline);
1224 void intel_ring_reset(struct intel_ring *ring, u32 tail)
1226 GEM_BUG_ON(!intel_ring_offset_valid(ring, tail));
1231 intel_ring_update_space(ring);
1234 void intel_ring_unpin(struct intel_ring *ring)
1236 GEM_BUG_ON(!ring->vma);
1237 GEM_BUG_ON(!ring->vaddr);
1239 /* Discard any unused bytes beyond that submitted to hw. */
1240 intel_ring_reset(ring, ring->tail);
1242 if (i915_vma_is_map_and_fenceable(ring->vma))
1243 i915_vma_unpin_iomap(ring->vma);
1245 i915_gem_object_unpin_map(ring->vma->obj);
1248 ring->vma->obj->pin_global--;
1249 i915_vma_unpin(ring->vma);
1251 i915_timeline_unpin(ring->timeline);
1254 static struct i915_vma *
1255 intel_ring_create_vma(struct drm_i915_private *dev_priv, int size)
1257 struct i915_address_space *vm = &dev_priv->ggtt.vm;
1258 struct drm_i915_gem_object *obj;
1259 struct i915_vma *vma;
1261 obj = i915_gem_object_create_stolen(dev_priv, size);
1263 obj = i915_gem_object_create_internal(dev_priv, size);
1265 return ERR_CAST(obj);
1268 * Mark ring buffers as read-only from GPU side (so no stray overwrites)
1269 * if supported by the platform's GGTT.
1271 if (vm->has_read_only)
1272 i915_gem_object_set_readonly(obj);
1274 vma = i915_vma_instance(obj, vm, NULL);
1281 i915_gem_object_put(obj);
1286 intel_engine_create_ring(struct intel_engine_cs *engine,
1287 struct i915_timeline *timeline,
1290 struct intel_ring *ring;
1291 struct i915_vma *vma;
1293 GEM_BUG_ON(!is_power_of_2(size));
1294 GEM_BUG_ON(RING_CTL_SIZE(size) & ~RING_NR_PAGES);
1295 GEM_BUG_ON(timeline == &engine->timeline);
1296 lockdep_assert_held(&engine->i915->drm.struct_mutex);
1298 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
1300 return ERR_PTR(-ENOMEM);
1302 kref_init(&ring->ref);
1303 INIT_LIST_HEAD(&ring->request_list);
1304 ring->timeline = i915_timeline_get(timeline);
1307 /* Workaround an erratum on the i830 which causes a hang if
1308 * the TAIL pointer points to within the last 2 cachelines
1311 ring->effective_size = size;
1312 if (IS_I830(engine->i915) || IS_I845G(engine->i915))
1313 ring->effective_size -= 2 * CACHELINE_BYTES;
1315 intel_ring_update_space(ring);
1317 vma = intel_ring_create_vma(engine->i915, size);
1320 return ERR_CAST(vma);
1327 void intel_ring_free(struct kref *ref)
1329 struct intel_ring *ring = container_of(ref, typeof(*ring), ref);
1330 struct drm_i915_gem_object *obj = ring->vma->obj;
1332 i915_vma_close(ring->vma);
1333 __i915_gem_object_release_unless_active(obj);
1335 i915_timeline_put(ring->timeline);
1339 static void __ring_context_fini(struct intel_context *ce)
1341 GEM_BUG_ON(i915_gem_object_is_active(ce->state->obj));
1342 i915_gem_object_put(ce->state->obj);
1345 static void ring_context_destroy(struct kref *ref)
1347 struct intel_context *ce = container_of(ref, typeof(*ce), ref);
1349 GEM_BUG_ON(intel_context_is_pinned(ce));
1352 __ring_context_fini(ce);
1354 intel_context_free(ce);
1357 static int __context_pin_ppgtt(struct i915_gem_context *ctx)
1359 struct i915_hw_ppgtt *ppgtt;
1362 ppgtt = ctx->ppgtt ?: ctx->i915->mm.aliasing_ppgtt;
1364 err = gen6_ppgtt_pin(ppgtt);
1369 static void __context_unpin_ppgtt(struct i915_gem_context *ctx)
1371 struct i915_hw_ppgtt *ppgtt;
1373 ppgtt = ctx->ppgtt ?: ctx->i915->mm.aliasing_ppgtt;
1375 gen6_ppgtt_unpin(ppgtt);
1378 static int __context_pin(struct intel_context *ce)
1380 struct i915_vma *vma;
1387 err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
1392 * And mark is as a globally pinned object to let the shrinker know
1393 * it cannot reclaim the object until we release it.
1395 vma->obj->pin_global++;
1396 vma->obj->mm.dirty = true;
1401 static void __context_unpin(struct intel_context *ce)
1403 struct i915_vma *vma;
1409 vma->obj->pin_global--;
1410 i915_vma_unpin(vma);
1413 static void ring_context_unpin(struct intel_context *ce)
1415 __context_unpin_ppgtt(ce->gem_context);
1416 __context_unpin(ce);
1419 static struct i915_vma *
1420 alloc_context_vma(struct intel_engine_cs *engine)
1422 struct drm_i915_private *i915 = engine->i915;
1423 struct drm_i915_gem_object *obj;
1424 struct i915_vma *vma;
1427 obj = i915_gem_object_create(i915, engine->context_size);
1429 return ERR_CAST(obj);
1432 * Try to make the context utilize L3 as well as LLC.
1434 * On VLV we don't have L3 controls in the PTEs so we
1435 * shouldn't touch the cache level, especially as that
1436 * would make the object snooped which might have a
1437 * negative performance impact.
1439 * Snooping is required on non-llc platforms in execlist
1440 * mode, but since all GGTT accesses use PAT entry 0 we
1441 * get snooping anyway regardless of cache_level.
1443 * This is only applicable for Ivy Bridge devices since
1444 * later platforms don't have L3 control bits in the PTE.
1446 if (IS_IVYBRIDGE(i915))
1447 i915_gem_object_set_cache_coherency(obj, I915_CACHE_L3_LLC);
1449 if (engine->default_state) {
1450 void *defaults, *vaddr;
1452 vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
1453 if (IS_ERR(vaddr)) {
1454 err = PTR_ERR(vaddr);
1458 defaults = i915_gem_object_pin_map(engine->default_state,
1460 if (IS_ERR(defaults)) {
1461 err = PTR_ERR(defaults);
1465 memcpy(vaddr, defaults, engine->context_size);
1466 i915_gem_object_unpin_map(engine->default_state);
1468 i915_gem_object_flush_map(obj);
1469 i915_gem_object_unpin_map(obj);
1472 vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
1481 i915_gem_object_unpin_map(obj);
1483 i915_gem_object_put(obj);
1484 return ERR_PTR(err);
1487 static int ring_context_pin(struct intel_context *ce)
1489 struct intel_engine_cs *engine = ce->engine;
1492 /* One ringbuffer to rule them all */
1493 GEM_BUG_ON(!engine->buffer);
1494 ce->ring = engine->buffer;
1496 if (!ce->state && engine->context_size) {
1497 struct i915_vma *vma;
1499 vma = alloc_context_vma(engine);
1501 return PTR_ERR(vma);
1506 err = __context_pin(ce);
1510 err = __context_pin_ppgtt(ce->gem_context);
1517 __context_unpin(ce);
1521 static const struct intel_context_ops ring_context_ops = {
1522 .pin = ring_context_pin,
1523 .unpin = ring_context_unpin,
1524 .destroy = ring_context_destroy,
1527 static int intel_init_ring_buffer(struct intel_engine_cs *engine)
1529 struct i915_timeline *timeline;
1530 struct intel_ring *ring;
1533 err = intel_engine_setup_common(engine);
1537 timeline = i915_timeline_create(engine->i915, engine->status_page.vma);
1538 if (IS_ERR(timeline)) {
1539 err = PTR_ERR(timeline);
1542 GEM_BUG_ON(timeline->has_initial_breadcrumb);
1544 ring = intel_engine_create_ring(engine, timeline, 32 * PAGE_SIZE);
1545 i915_timeline_put(timeline);
1547 err = PTR_ERR(ring);
1551 err = intel_ring_pin(ring);
1555 GEM_BUG_ON(engine->buffer);
1556 engine->buffer = ring;
1558 err = intel_engine_init_common(engine);
1562 GEM_BUG_ON(ring->timeline->hwsp_ggtt != engine->status_page.vma);
1567 intel_ring_unpin(ring);
1569 intel_ring_put(ring);
1571 intel_engine_cleanup_common(engine);
1575 void intel_engine_cleanup(struct intel_engine_cs *engine)
1577 struct drm_i915_private *dev_priv = engine->i915;
1579 WARN_ON(INTEL_GEN(dev_priv) > 2 &&
1580 (I915_READ_MODE(engine) & MODE_IDLE) == 0);
1582 intel_ring_unpin(engine->buffer);
1583 intel_ring_put(engine->buffer);
1585 if (engine->cleanup)
1586 engine->cleanup(engine);
1588 intel_engine_cleanup_common(engine);
1590 dev_priv->engine[engine->id] = NULL;
1594 void intel_legacy_submission_resume(struct drm_i915_private *dev_priv)
1596 struct intel_engine_cs *engine;
1597 enum intel_engine_id id;
1599 /* Restart from the beginning of the rings for convenience */
1600 for_each_engine(engine, dev_priv, id)
1601 intel_ring_reset(engine->buffer, 0);
1604 static int load_pd_dir(struct i915_request *rq,
1605 const struct i915_hw_ppgtt *ppgtt)
1607 const struct intel_engine_cs * const engine = rq->engine;
1610 cs = intel_ring_begin(rq, 6);
1614 *cs++ = MI_LOAD_REGISTER_IMM(1);
1615 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine));
1616 *cs++ = PP_DIR_DCLV_2G;
1618 *cs++ = MI_LOAD_REGISTER_IMM(1);
1619 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
1620 *cs++ = ppgtt->pd.base.ggtt_offset << 10;
1622 intel_ring_advance(rq, cs);
1627 static int flush_pd_dir(struct i915_request *rq)
1629 const struct intel_engine_cs * const engine = rq->engine;
1632 cs = intel_ring_begin(rq, 4);
1636 /* Stall until the page table load is complete */
1637 *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
1638 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
1639 *cs++ = i915_scratch_offset(rq->i915);
1642 intel_ring_advance(rq, cs);
1646 static inline int mi_set_context(struct i915_request *rq, u32 flags)
1648 struct drm_i915_private *i915 = rq->i915;
1649 struct intel_engine_cs *engine = rq->engine;
1650 enum intel_engine_id id;
1651 const int num_engines =
1652 IS_HSW_GT1(i915) ? RUNTIME_INFO(i915)->num_engines - 1 : 0;
1653 bool force_restore = false;
1657 flags |= MI_MM_SPACE_GTT;
1658 if (IS_HASWELL(i915))
1659 /* These flags are for resource streamer on HSW+ */
1660 flags |= HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN;
1662 flags |= MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN;
1665 if (IS_GEN(i915, 7))
1666 len += 2 + (num_engines ? 4 * num_engines + 6 : 0);
1667 if (flags & MI_FORCE_RESTORE) {
1668 GEM_BUG_ON(flags & MI_RESTORE_INHIBIT);
1669 flags &= ~MI_FORCE_RESTORE;
1670 force_restore = true;
1674 cs = intel_ring_begin(rq, len);
1678 /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
1679 if (IS_GEN(i915, 7)) {
1680 *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
1682 struct intel_engine_cs *signaller;
1684 *cs++ = MI_LOAD_REGISTER_IMM(num_engines);
1685 for_each_engine(signaller, i915, id) {
1686 if (signaller == engine)
1689 *cs++ = i915_mmio_reg_offset(
1690 RING_PSMI_CTL(signaller->mmio_base));
1691 *cs++ = _MASKED_BIT_ENABLE(
1692 GEN6_PSMI_SLEEP_MSG_DISABLE);
1697 if (force_restore) {
1699 * The HW doesn't handle being told to restore the current
1700 * context very well. Quite often it likes goes to go off and
1701 * sulk, especially when it is meant to be reloading PP_DIR.
1702 * A very simple fix to force the reload is to simply switch
1703 * away from the current context and back again.
1705 * Note that the kernel_context will contain random state
1706 * following the INHIBIT_RESTORE. We accept this since we
1707 * never use the kernel_context state; it is merely a
1708 * placeholder we use to flush other contexts.
1710 *cs++ = MI_SET_CONTEXT;
1711 *cs++ = i915_ggtt_offset(engine->kernel_context->state) |
1717 *cs++ = MI_SET_CONTEXT;
1718 *cs++ = i915_ggtt_offset(rq->hw_context->state) | flags;
1720 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
1721 * WaMiSetContext_Hang:snb,ivb,vlv
1725 if (IS_GEN(i915, 7)) {
1727 struct intel_engine_cs *signaller;
1728 i915_reg_t last_reg = {}; /* keep gcc quiet */
1730 *cs++ = MI_LOAD_REGISTER_IMM(num_engines);
1731 for_each_engine(signaller, i915, id) {
1732 if (signaller == engine)
1735 last_reg = RING_PSMI_CTL(signaller->mmio_base);
1736 *cs++ = i915_mmio_reg_offset(last_reg);
1737 *cs++ = _MASKED_BIT_DISABLE(
1738 GEN6_PSMI_SLEEP_MSG_DISABLE);
1741 /* Insert a delay before the next switch! */
1742 *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
1743 *cs++ = i915_mmio_reg_offset(last_reg);
1744 *cs++ = i915_scratch_offset(rq->i915);
1747 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
1750 intel_ring_advance(rq, cs);
1755 static int remap_l3(struct i915_request *rq, int slice)
1757 u32 *cs, *remap_info = rq->i915->l3_parity.remap_info[slice];
1763 cs = intel_ring_begin(rq, GEN7_L3LOG_SIZE/4 * 2 + 2);
1768 * Note: We do not worry about the concurrent register cacheline hang
1769 * here because no other code should access these registers other than
1770 * at initialization time.
1772 *cs++ = MI_LOAD_REGISTER_IMM(GEN7_L3LOG_SIZE/4);
1773 for (i = 0; i < GEN7_L3LOG_SIZE/4; i++) {
1774 *cs++ = i915_mmio_reg_offset(GEN7_L3LOG(slice, i));
1775 *cs++ = remap_info[i];
1778 intel_ring_advance(rq, cs);
1783 static int switch_context(struct i915_request *rq)
1785 struct intel_engine_cs *engine = rq->engine;
1786 struct i915_gem_context *ctx = rq->gem_context;
1787 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt ?: rq->i915->mm.aliasing_ppgtt;
1788 unsigned int unwind_mm = 0;
1792 lockdep_assert_held(&rq->i915->drm.struct_mutex);
1793 GEM_BUG_ON(HAS_EXECLISTS(rq->i915));
1799 * Baytail takes a little more convincing that it really needs
1800 * to reload the PD between contexts. It is not just a little
1801 * longer, as adding more stalls after the load_pd_dir (i.e.
1802 * adding a long loop around flush_pd_dir) is not as effective
1803 * as reloading the PD umpteen times. 32 is derived from
1804 * experimentation (gem_exec_parallel/fds) and has no good
1808 if (engine->id == BCS0 && IS_VALLEYVIEW(engine->i915))
1812 ret = load_pd_dir(rq, ppgtt);
1817 if (ppgtt->pd_dirty_engines & engine->mask) {
1818 unwind_mm = engine->mask;
1819 ppgtt->pd_dirty_engines &= ~unwind_mm;
1820 hw_flags = MI_FORCE_RESTORE;
1824 if (rq->hw_context->state) {
1825 GEM_BUG_ON(engine->id != RCS0);
1828 * The kernel context(s) is treated as pure scratch and is not
1829 * expected to retain any state (as we sacrifice it during
1830 * suspend and on resume it may be corrupted). This is ok,
1831 * as nothing actually executes using the kernel context; it
1832 * is purely used for flushing user contexts.
1834 if (i915_gem_context_is_kernel(ctx))
1835 hw_flags = MI_RESTORE_INHIBIT;
1837 ret = mi_set_context(rq, hw_flags);
1843 ret = engine->emit_flush(rq, EMIT_INVALIDATE);
1847 ret = flush_pd_dir(rq);
1852 * Not only do we need a full barrier (post-sync write) after
1853 * invalidating the TLBs, but we need to wait a little bit
1854 * longer. Whether this is merely delaying us, or the
1855 * subsequent flush is a key part of serialising with the
1856 * post-sync op, this extra pass appears vital before a
1859 ret = engine->emit_flush(rq, EMIT_INVALIDATE);
1863 ret = engine->emit_flush(rq, EMIT_FLUSH);
1868 if (ctx->remap_slice) {
1869 for (i = 0; i < MAX_L3_SLICES; i++) {
1870 if (!(ctx->remap_slice & BIT(i)))
1873 ret = remap_l3(rq, i);
1878 ctx->remap_slice = 0;
1885 ppgtt->pd_dirty_engines |= unwind_mm;
1890 static int ring_request_alloc(struct i915_request *request)
1894 GEM_BUG_ON(!intel_context_is_pinned(request->hw_context));
1895 GEM_BUG_ON(request->timeline->has_initial_breadcrumb);
1898 * Flush enough space to reduce the likelihood of waiting after
1899 * we start building the request - in which case we will just
1900 * have to repeat work.
1902 request->reserved_space += LEGACY_REQUEST_SIZE;
1904 ret = switch_context(request);
1908 /* Unconditionally invalidate GPU caches and TLBs. */
1909 ret = request->engine->emit_flush(request, EMIT_INVALIDATE);
1913 request->reserved_space -= LEGACY_REQUEST_SIZE;
1917 static noinline int wait_for_space(struct intel_ring *ring, unsigned int bytes)
1919 struct i915_request *target;
1922 lockdep_assert_held(&ring->vma->vm->i915->drm.struct_mutex);
1924 if (intel_ring_update_space(ring) >= bytes)
1927 GEM_BUG_ON(list_empty(&ring->request_list));
1928 list_for_each_entry(target, &ring->request_list, ring_link) {
1929 /* Would completion of this request free enough space? */
1930 if (bytes <= __intel_ring_space(target->postfix,
1931 ring->emit, ring->size))
1935 if (WARN_ON(&target->ring_link == &ring->request_list))
1938 timeout = i915_request_wait(target,
1939 I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
1940 MAX_SCHEDULE_TIMEOUT);
1944 i915_request_retire_upto(target);
1946 intel_ring_update_space(ring);
1947 GEM_BUG_ON(ring->space < bytes);
1951 u32 *intel_ring_begin(struct i915_request *rq, unsigned int num_dwords)
1953 struct intel_ring *ring = rq->ring;
1954 const unsigned int remain_usable = ring->effective_size - ring->emit;
1955 const unsigned int bytes = num_dwords * sizeof(u32);
1956 unsigned int need_wrap = 0;
1957 unsigned int total_bytes;
1960 /* Packets must be qword aligned. */
1961 GEM_BUG_ON(num_dwords & 1);
1963 total_bytes = bytes + rq->reserved_space;
1964 GEM_BUG_ON(total_bytes > ring->effective_size);
1966 if (unlikely(total_bytes > remain_usable)) {
1967 const int remain_actual = ring->size - ring->emit;
1969 if (bytes > remain_usable) {
1971 * Not enough space for the basic request. So need to
1972 * flush out the remainder and then wait for
1975 total_bytes += remain_actual;
1976 need_wrap = remain_actual | 1;
1979 * The base request will fit but the reserved space
1980 * falls off the end. So we don't need an immediate
1981 * wrap and only need to effectively wait for the
1982 * reserved size from the start of ringbuffer.
1984 total_bytes = rq->reserved_space + remain_actual;
1988 if (unlikely(total_bytes > ring->space)) {
1992 * Space is reserved in the ringbuffer for finalising the
1993 * request, as that cannot be allowed to fail. During request
1994 * finalisation, reserved_space is set to 0 to stop the
1995 * overallocation and the assumption is that then we never need
1996 * to wait (which has the risk of failing with EINTR).
1998 * See also i915_request_alloc() and i915_request_add().
2000 GEM_BUG_ON(!rq->reserved_space);
2002 ret = wait_for_space(ring, total_bytes);
2004 return ERR_PTR(ret);
2007 if (unlikely(need_wrap)) {
2009 GEM_BUG_ON(need_wrap > ring->space);
2010 GEM_BUG_ON(ring->emit + need_wrap > ring->size);
2011 GEM_BUG_ON(!IS_ALIGNED(need_wrap, sizeof(u64)));
2013 /* Fill the tail with MI_NOOP */
2014 memset64(ring->vaddr + ring->emit, 0, need_wrap / sizeof(u64));
2015 ring->space -= need_wrap;
2019 GEM_BUG_ON(ring->emit > ring->size - bytes);
2020 GEM_BUG_ON(ring->space < bytes);
2021 cs = ring->vaddr + ring->emit;
2022 GEM_DEBUG_EXEC(memset32(cs, POISON_INUSE, bytes / sizeof(*cs)));
2023 ring->emit += bytes;
2024 ring->space -= bytes;
2029 /* Align the ring tail to a cacheline boundary */
2030 int intel_ring_cacheline_align(struct i915_request *rq)
2035 num_dwords = (rq->ring->emit & (CACHELINE_BYTES - 1)) / sizeof(u32);
2036 if (num_dwords == 0)
2039 num_dwords = CACHELINE_DWORDS - num_dwords;
2040 GEM_BUG_ON(num_dwords & 1);
2042 cs = intel_ring_begin(rq, num_dwords);
2046 memset64(cs, (u64)MI_NOOP << 32 | MI_NOOP, num_dwords / 2);
2047 intel_ring_advance(rq, cs);
2049 GEM_BUG_ON(rq->ring->emit & (CACHELINE_BYTES - 1));
2053 static void gen6_bsd_submit_request(struct i915_request *request)
2055 struct intel_uncore *uncore = &request->i915->uncore;
2057 intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
2059 /* Every tail move must follow the sequence below */
2061 /* Disable notification that the ring is IDLE. The GT
2062 * will then assume that it is busy and bring it out of rc6.
2064 intel_uncore_write_fw(uncore, GEN6_BSD_SLEEP_PSMI_CONTROL,
2065 _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
2067 /* Clear the context id. Here be magic! */
2068 intel_uncore_write64_fw(uncore, GEN6_BSD_RNCID, 0x0);
2070 /* Wait for the ring not to be idle, i.e. for it to wake up. */
2071 if (__intel_wait_for_register_fw(uncore,
2072 GEN6_BSD_SLEEP_PSMI_CONTROL,
2073 GEN6_BSD_SLEEP_INDICATOR,
2076 DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
2078 /* Now that the ring is fully powered up, update the tail */
2079 i9xx_submit_request(request);
2081 /* Let the ring send IDLE messages to the GT again,
2082 * and so let it sleep to conserve power when idle.
2084 intel_uncore_write_fw(uncore, GEN6_BSD_SLEEP_PSMI_CONTROL,
2085 _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
2087 intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
2090 static int mi_flush_dw(struct i915_request *rq, u32 flags)
2094 cs = intel_ring_begin(rq, 4);
2101 * We always require a command barrier so that subsequent
2102 * commands, such as breadcrumb interrupts, are strictly ordered
2103 * wrt the contents of the write cache being flushed to memory
2104 * (and thus being coherent from the CPU).
2106 cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
2109 * Bspec vol 1c.3 - blitter engine command streamer:
2110 * "If ENABLED, all TLBs will be invalidated once the flush
2111 * operation is complete. This bit is only valid when the
2112 * Post-Sync Operation field is a value of 1h or 3h."
2117 *cs++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT;
2121 intel_ring_advance(rq, cs);
2126 static int gen6_flush_dw(struct i915_request *rq, u32 mode, u32 invflags)
2128 return mi_flush_dw(rq, mode & EMIT_INVALIDATE ? invflags : 0);
2131 static int gen6_bsd_ring_flush(struct i915_request *rq, u32 mode)
2133 return gen6_flush_dw(rq, mode, MI_INVALIDATE_TLB | MI_INVALIDATE_BSD);
2137 hsw_emit_bb_start(struct i915_request *rq,
2138 u64 offset, u32 len,
2139 unsigned int dispatch_flags)
2143 cs = intel_ring_begin(rq, 2);
2147 *cs++ = MI_BATCH_BUFFER_START | (dispatch_flags & I915_DISPATCH_SECURE ?
2148 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW);
2149 /* bit0-7 is the length on GEN6+ */
2151 intel_ring_advance(rq, cs);
2157 gen6_emit_bb_start(struct i915_request *rq,
2158 u64 offset, u32 len,
2159 unsigned int dispatch_flags)
2163 cs = intel_ring_begin(rq, 2);
2167 *cs++ = MI_BATCH_BUFFER_START | (dispatch_flags & I915_DISPATCH_SECURE ?
2168 0 : MI_BATCH_NON_SECURE_I965);
2169 /* bit0-7 is the length on GEN6+ */
2171 intel_ring_advance(rq, cs);
2176 /* Blitter support (SandyBridge+) */
2178 static int gen6_ring_flush(struct i915_request *rq, u32 mode)
2180 return gen6_flush_dw(rq, mode, MI_INVALIDATE_TLB);
2183 static void intel_ring_init_irq(struct drm_i915_private *dev_priv,
2184 struct intel_engine_cs *engine)
2186 if (INTEL_GEN(dev_priv) >= 6) {
2187 engine->irq_enable = gen6_irq_enable;
2188 engine->irq_disable = gen6_irq_disable;
2189 } else if (INTEL_GEN(dev_priv) >= 5) {
2190 engine->irq_enable = gen5_irq_enable;
2191 engine->irq_disable = gen5_irq_disable;
2192 } else if (INTEL_GEN(dev_priv) >= 3) {
2193 engine->irq_enable = i9xx_irq_enable;
2194 engine->irq_disable = i9xx_irq_disable;
2196 engine->irq_enable = i8xx_irq_enable;
2197 engine->irq_disable = i8xx_irq_disable;
2201 static void i9xx_set_default_submission(struct intel_engine_cs *engine)
2203 engine->submit_request = i9xx_submit_request;
2204 engine->cancel_requests = cancel_requests;
2206 engine->park = NULL;
2207 engine->unpark = NULL;
2210 static void gen6_bsd_set_default_submission(struct intel_engine_cs *engine)
2212 i9xx_set_default_submission(engine);
2213 engine->submit_request = gen6_bsd_submit_request;
2216 static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
2217 struct intel_engine_cs *engine)
2219 /* gen8+ are only supported with execlists */
2220 GEM_BUG_ON(INTEL_GEN(dev_priv) >= 8);
2222 intel_ring_init_irq(dev_priv, engine);
2224 engine->init_hw = init_ring_common;
2225 engine->reset.prepare = reset_prepare;
2226 engine->reset.reset = reset_ring;
2227 engine->reset.finish = reset_finish;
2229 engine->cops = &ring_context_ops;
2230 engine->request_alloc = ring_request_alloc;
2233 * Using a global execution timeline; the previous final breadcrumb is
2234 * equivalent to our next initial bread so we can elide
2235 * engine->emit_init_breadcrumb().
2237 engine->emit_fini_breadcrumb = i9xx_emit_breadcrumb;
2238 if (IS_GEN(dev_priv, 5))
2239 engine->emit_fini_breadcrumb = gen5_emit_breadcrumb;
2241 engine->set_default_submission = i9xx_set_default_submission;
2243 if (INTEL_GEN(dev_priv) >= 6)
2244 engine->emit_bb_start = gen6_emit_bb_start;
2245 else if (INTEL_GEN(dev_priv) >= 4)
2246 engine->emit_bb_start = i965_emit_bb_start;
2247 else if (IS_I830(dev_priv) || IS_I845G(dev_priv))
2248 engine->emit_bb_start = i830_emit_bb_start;
2250 engine->emit_bb_start = i915_emit_bb_start;
2253 int intel_init_render_ring_buffer(struct intel_engine_cs *engine)
2255 struct drm_i915_private *dev_priv = engine->i915;
2258 intel_ring_default_vfuncs(dev_priv, engine);
2260 if (HAS_L3_DPF(dev_priv))
2261 engine->irq_keep_mask = GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
2263 engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
2265 if (INTEL_GEN(dev_priv) >= 7) {
2266 engine->init_context = intel_rcs_ctx_init;
2267 engine->emit_flush = gen7_render_ring_flush;
2268 engine->emit_fini_breadcrumb = gen7_rcs_emit_breadcrumb;
2269 } else if (IS_GEN(dev_priv, 6)) {
2270 engine->init_context = intel_rcs_ctx_init;
2271 engine->emit_flush = gen6_render_ring_flush;
2272 engine->emit_fini_breadcrumb = gen6_rcs_emit_breadcrumb;
2273 } else if (IS_GEN(dev_priv, 5)) {
2274 engine->emit_flush = gen4_render_ring_flush;
2276 if (INTEL_GEN(dev_priv) < 4)
2277 engine->emit_flush = gen2_render_ring_flush;
2279 engine->emit_flush = gen4_render_ring_flush;
2280 engine->irq_enable_mask = I915_USER_INTERRUPT;
2283 if (IS_HASWELL(dev_priv))
2284 engine->emit_bb_start = hsw_emit_bb_start;
2286 engine->init_hw = init_render_ring;
2288 ret = intel_init_ring_buffer(engine);
2295 int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine)
2297 struct drm_i915_private *dev_priv = engine->i915;
2299 intel_ring_default_vfuncs(dev_priv, engine);
2301 if (INTEL_GEN(dev_priv) >= 6) {
2302 /* gen6 bsd needs a special wa for tail updates */
2303 if (IS_GEN(dev_priv, 6))
2304 engine->set_default_submission = gen6_bsd_set_default_submission;
2305 engine->emit_flush = gen6_bsd_ring_flush;
2306 engine->irq_enable_mask = GT_BSD_USER_INTERRUPT;
2308 if (IS_GEN(dev_priv, 6))
2309 engine->emit_fini_breadcrumb = gen6_xcs_emit_breadcrumb;
2311 engine->emit_fini_breadcrumb = gen7_xcs_emit_breadcrumb;
2313 engine->emit_flush = bsd_ring_flush;
2314 if (IS_GEN(dev_priv, 5))
2315 engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
2317 engine->irq_enable_mask = I915_BSD_USER_INTERRUPT;
2320 return intel_init_ring_buffer(engine);
2323 int intel_init_blt_ring_buffer(struct intel_engine_cs *engine)
2325 struct drm_i915_private *dev_priv = engine->i915;
2327 GEM_BUG_ON(INTEL_GEN(dev_priv) < 6);
2329 intel_ring_default_vfuncs(dev_priv, engine);
2331 engine->emit_flush = gen6_ring_flush;
2332 engine->irq_enable_mask = GT_BLT_USER_INTERRUPT;
2334 if (IS_GEN(dev_priv, 6))
2335 engine->emit_fini_breadcrumb = gen6_xcs_emit_breadcrumb;
2337 engine->emit_fini_breadcrumb = gen7_xcs_emit_breadcrumb;
2339 return intel_init_ring_buffer(engine);
2342 int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine)
2344 struct drm_i915_private *dev_priv = engine->i915;
2346 GEM_BUG_ON(INTEL_GEN(dev_priv) < 7);
2348 intel_ring_default_vfuncs(dev_priv, engine);
2350 engine->emit_flush = gen6_ring_flush;
2351 engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
2352 engine->irq_enable = hsw_vebox_irq_enable;
2353 engine->irq_disable = hsw_vebox_irq_disable;
2355 engine->emit_fini_breadcrumb = gen7_xcs_emit_breadcrumb;
2357 return intel_init_ring_buffer(engine);