1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _INTEL_RINGBUFFER_H_
3 #define _INTEL_RINGBUFFER_H_
5 #include <drm/drm_util.h>
7 #include <linux/hashtable.h>
8 #include <linux/seqlock.h>
10 #include "i915_gem_batch_pool.h"
14 #include "i915_request.h"
15 #include "i915_selftest.h"
16 #include "i915_timeline.h"
17 #include "intel_gpu_commands.h"
20 struct i915_sched_attr;
22 #define I915_CMD_HASH_ORDER 9
24 /* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
25 * but keeps the logic simple. Indeed, the whole purpose of this macro is just
26 * to give some inclination as to some of the magic values used in the various
29 #define CACHELINE_BYTES 64
30 #define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(uint32_t))
32 struct intel_hw_status_page {
38 #define I915_READ_TAIL(engine) I915_READ(RING_TAIL((engine)->mmio_base))
39 #define I915_WRITE_TAIL(engine, val) I915_WRITE(RING_TAIL((engine)->mmio_base), val)
41 #define I915_READ_START(engine) I915_READ(RING_START((engine)->mmio_base))
42 #define I915_WRITE_START(engine, val) I915_WRITE(RING_START((engine)->mmio_base), val)
44 #define I915_READ_HEAD(engine) I915_READ(RING_HEAD((engine)->mmio_base))
45 #define I915_WRITE_HEAD(engine, val) I915_WRITE(RING_HEAD((engine)->mmio_base), val)
47 #define I915_READ_CTL(engine) I915_READ(RING_CTL((engine)->mmio_base))
48 #define I915_WRITE_CTL(engine, val) I915_WRITE(RING_CTL((engine)->mmio_base), val)
50 #define I915_READ_IMR(engine) I915_READ(RING_IMR((engine)->mmio_base))
51 #define I915_WRITE_IMR(engine, val) I915_WRITE(RING_IMR((engine)->mmio_base), val)
53 #define I915_READ_MODE(engine) I915_READ(RING_MI_MODE((engine)->mmio_base))
54 #define I915_WRITE_MODE(engine, val) I915_WRITE(RING_MI_MODE((engine)->mmio_base), val)
56 /* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to
57 * do the writes, and that must have qw aligned offsets, simply pretend it's 8b.
59 enum intel_engine_hangcheck_action {
64 ENGINE_ACTIVE_SUBUNITS,
69 static inline const char *
70 hangcheck_action_to_str(const enum intel_engine_hangcheck_action a)
77 case ENGINE_ACTIVE_SEQNO:
78 return "active seqno";
79 case ENGINE_ACTIVE_HEAD:
81 case ENGINE_ACTIVE_SUBUNITS:
82 return "active subunits";
83 case ENGINE_WAIT_KICK:
92 #define I915_MAX_SLICES 3
93 #define I915_MAX_SUBSLICES 8
95 #define instdone_slice_mask(dev_priv__) \
96 (INTEL_GEN(dev_priv__) == 7 ? \
97 1 : INTEL_INFO(dev_priv__)->sseu.slice_mask)
99 #define instdone_subslice_mask(dev_priv__) \
100 (INTEL_GEN(dev_priv__) == 7 ? \
101 1 : INTEL_INFO(dev_priv__)->sseu.subslice_mask[0])
103 #define for_each_instdone_slice_subslice(dev_priv__, slice__, subslice__) \
104 for ((slice__) = 0, (subslice__) = 0; \
105 (slice__) < I915_MAX_SLICES; \
106 (subslice__) = ((subslice__) + 1) < I915_MAX_SUBSLICES ? (subslice__) + 1 : 0, \
107 (slice__) += ((subslice__) == 0)) \
108 for_each_if((BIT(slice__) & instdone_slice_mask(dev_priv__)) && \
109 (BIT(subslice__) & instdone_subslice_mask(dev_priv__)))
111 struct intel_instdone {
113 /* The following exist only in the RCS engine */
115 u32 sampler[I915_MAX_SLICES][I915_MAX_SUBSLICES];
116 u32 row[I915_MAX_SLICES][I915_MAX_SUBSLICES];
119 struct intel_engine_hangcheck {
122 enum intel_engine_hangcheck_action action;
123 unsigned long action_timestamp;
125 struct intel_instdone instdone;
126 struct i915_request *active_request;
132 struct i915_vma *vma;
135 struct i915_timeline *timeline;
136 struct list_head request_list;
137 struct list_head active_link;
148 struct i915_gem_context;
149 struct drm_i915_reg_table;
152 * we use a single page to load ctx workarounds so all of these
153 * values are referred in terms of dwords
155 * struct i915_wa_ctx_bb:
156 * offset: specifies batch starting position, also helpful in case
157 * if we want to have multiple batches at different offsets based on
158 * some criteria. It is not a requirement at the moment but provides
159 * an option for future use.
160 * size: size of the batch in DWORDS
162 struct i915_ctx_workarounds {
163 struct i915_wa_ctx_bb {
166 } indirect_ctx, per_ctx;
167 struct i915_vma *vma;
172 #define I915_MAX_VCS 4
173 #define I915_MAX_VECS 2
176 * Engine IDs definitions.
177 * Keep instances of the same type engine together.
179 enum intel_engine_id {
186 #define _VCS(n) (VCS + (n))
189 #define _VECS(n) (VECS + (n))
192 struct i915_priolist {
194 struct list_head requests;
198 struct st_preempt_hang {
199 struct completion completion;
204 * struct intel_engine_execlists - execlist submission queue and port state
206 * The struct intel_engine_execlists represents the combined logical state of
207 * driver and the hardware state for execlist mode of submission.
209 struct intel_engine_execlists {
211 * @tasklet: softirq tasklet for bottom handler
213 struct tasklet_struct tasklet;
216 * @default_priolist: priority list for I915_PRIORITY_NORMAL
218 struct i915_priolist default_priolist;
221 * @no_priolist: priority lists disabled
226 * @submit_reg: gen-specific execlist submission register
227 * set to the ExecList Submission Port (elsp) register pre-Gen11 and to
228 * the ExecList Submission Queue Contents register array for Gen11+
230 u32 __iomem *submit_reg;
233 * @ctrl_reg: the enhanced execlists control register, used to load the
234 * submit queue on the HW and to request preemptions to idle
236 u32 __iomem *ctrl_reg;
239 * @port: execlist port states
241 * For each hardware ELSP (ExecList Submission Port) we keep
242 * track of the last request and the number of times we submitted
243 * that port to hw. We then count the number of times the hw reports
244 * a context completion or preemption. As only one context can
245 * be active on hw, we limit resubmission of context to port[0]. This
246 * is called Lite Restore, of the context.
248 struct execlist_port {
250 * @request_count: combined request and submission count
252 struct i915_request *request_count;
253 #define EXECLIST_COUNT_BITS 2
254 #define port_request(p) ptr_mask_bits((p)->request_count, EXECLIST_COUNT_BITS)
255 #define port_count(p) ptr_unmask_bits((p)->request_count, EXECLIST_COUNT_BITS)
256 #define port_pack(rq, count) ptr_pack_bits(rq, count, EXECLIST_COUNT_BITS)
257 #define port_unpack(p, count) ptr_unpack_bits((p)->request_count, count, EXECLIST_COUNT_BITS)
258 #define port_set(p, packed) ((p)->request_count = (packed))
259 #define port_isset(p) ((p)->request_count)
260 #define port_index(p, execlists) ((p) - (execlists)->port)
263 * @context_id: context ID for port
265 GEM_DEBUG_DECL(u32 context_id);
267 #define EXECLIST_MAX_PORTS 2
268 } port[EXECLIST_MAX_PORTS];
271 * @active: is the HW active? We consider the HW as active after
272 * submitting any context for execution and until we have seen the
273 * last context completion event. After that, we do not expect any
274 * more events until we submit, and so can park the HW.
276 * As we have a small number of different sources from which we feed
277 * the HW, we track the state of each inside a single bitfield.
280 #define EXECLISTS_ACTIVE_USER 0
281 #define EXECLISTS_ACTIVE_PREEMPT 1
282 #define EXECLISTS_ACTIVE_HWACK 2
285 * @port_mask: number of execlist ports - 1
287 unsigned int port_mask;
290 * @queue_priority: Highest pending priority.
292 * When we add requests into the queue, or adjust the priority of
293 * executing requests, we compute the maximum priority of those
294 * pending requests. We can then use this value to determine if
295 * we need to preempt the executing requests to service the queue.
300 * @queue: queue of requests, in priority lists
302 struct rb_root_cached queue;
305 * @csb_read: control register for Context Switch buffer
307 * Note this register is always in mmio.
309 u32 __iomem *csb_read;
312 * @csb_write: control register for Context Switch buffer
314 * Note this register may be either mmio or HWSP shadow.
319 * @csb_status: status array for Context Switch buffer
321 * Note these register may be either mmio or HWSP shadow.
326 * @preempt_complete_status: expected CSB upon completing preemption
328 u32 preempt_complete_status;
331 * @csb_write_reset: reset value for CSB write pointer
333 * As the CSB write pointer maybe either in HWSP or as a field
334 * inside an mmio register, we want to reprogram it slightly
335 * differently to avoid later confusion.
340 * @csb_head: context status buffer head
344 I915_SELFTEST_DECLARE(struct st_preempt_hang preempt_hang;)
347 #define INTEL_ENGINE_CS_MAX_NAME 8
349 struct intel_engine_cs {
350 struct drm_i915_private *i915;
351 char name[INTEL_ENGINE_CS_MAX_NAME];
353 enum intel_engine_id id;
365 struct intel_ring *buffer;
367 struct i915_timeline timeline;
369 struct drm_i915_gem_object *default_state;
370 void *pinned_default_state;
372 unsigned long irq_posted;
373 #define ENGINE_IRQ_BREADCRUMB 0
375 /* Rather than have every client wait upon all user interrupts,
376 * with the herd waking after every interrupt and each doing the
377 * heavyweight seqno dance, we delegate the task (of being the
378 * bottom-half of the user interrupt) to the first client. After
379 * every interrupt, we wake up one client, who does the heavyweight
380 * coherent seqno read and either goes back to sleep (if incomplete),
381 * or wakes up all the completed clients in parallel, before then
382 * transferring the bottom-half status to the next client in the queue.
384 * Compared to walking the entire list of waiters in a single dedicated
385 * bottom-half, we reduce the latency of the first waiter by avoiding
386 * a context switch, but incur additional coherent seqno reads when
387 * following the chain of request breadcrumbs. Since it is most likely
388 * that we have a single client waiting on each seqno, then reducing
389 * the overhead of waking that client is much preferred.
391 struct intel_breadcrumbs {
392 spinlock_t irq_lock; /* protects irq_*; irqsafe */
393 struct intel_wait *irq_wait; /* oldest waiter by retirement */
395 spinlock_t rb_lock; /* protects the rb and wraps irq_lock */
396 struct rb_root waiters; /* sorted by retirement, priority */
397 struct list_head signals; /* sorted by retirement */
398 struct task_struct *signaler; /* used for fence signalling */
400 struct timer_list fake_irq; /* used after a missed interrupt */
401 struct timer_list hangcheck; /* detect missed interrupts */
403 unsigned int hangcheck_interrupts;
404 unsigned int irq_enabled;
405 unsigned int irq_count;
408 I915_SELFTEST_DECLARE(bool mock : 1);
413 * @enable: Bitmask of enable sample events on this engine.
415 * Bits correspond to sample event types, for instance
416 * I915_SAMPLE_QUEUED is bit 0 etc.
420 * @enable_count: Reference count for the enabled samplers.
422 * Index number corresponds to the bit number from @enable.
424 unsigned int enable_count[I915_PMU_SAMPLE_BITS];
426 * @sample: Counter values for sampling events.
428 * Our internal timer stores the current counters in this field.
430 #define I915_ENGINE_SAMPLE_MAX (I915_SAMPLE_SEMA + 1)
431 struct i915_pmu_sample sample[I915_ENGINE_SAMPLE_MAX];
435 * A pool of objects to use as shadow copies of client batch buffers
436 * when the command parser is enabled. Prevents the client from
437 * modifying the batch contents after software parsing.
439 struct i915_gem_batch_pool batch_pool;
441 struct intel_hw_status_page status_page;
442 struct i915_ctx_workarounds wa_ctx;
443 struct i915_vma *scratch;
445 u32 irq_keep_mask; /* always keep these interrupts */
446 u32 irq_enable_mask; /* bitmask to enable ring interrupt */
447 void (*irq_enable)(struct intel_engine_cs *engine);
448 void (*irq_disable)(struct intel_engine_cs *engine);
450 int (*init_hw)(struct intel_engine_cs *engine);
453 struct i915_request *(*prepare)(struct intel_engine_cs *engine);
454 void (*reset)(struct intel_engine_cs *engine,
455 struct i915_request *rq);
456 void (*finish)(struct intel_engine_cs *engine);
459 void (*park)(struct intel_engine_cs *engine);
460 void (*unpark)(struct intel_engine_cs *engine);
462 void (*set_default_submission)(struct intel_engine_cs *engine);
464 struct intel_context *(*context_pin)(struct intel_engine_cs *engine,
465 struct i915_gem_context *ctx);
467 int (*request_alloc)(struct i915_request *rq);
468 int (*init_context)(struct i915_request *rq);
470 int (*emit_flush)(struct i915_request *request, u32 mode);
471 #define EMIT_INVALIDATE BIT(0)
472 #define EMIT_FLUSH BIT(1)
473 #define EMIT_BARRIER (EMIT_INVALIDATE | EMIT_FLUSH)
474 int (*emit_bb_start)(struct i915_request *rq,
475 u64 offset, u32 length,
476 unsigned int dispatch_flags);
477 #define I915_DISPATCH_SECURE BIT(0)
478 #define I915_DISPATCH_PINNED BIT(1)
479 #define I915_DISPATCH_RS BIT(2)
480 void (*emit_breadcrumb)(struct i915_request *rq, u32 *cs);
481 int emit_breadcrumb_sz;
483 /* Pass the request to the hardware queue (e.g. directly into
484 * the legacy ringbuffer or to the end of an execlist).
486 * This is called from an atomic context with irqs disabled; must
489 void (*submit_request)(struct i915_request *rq);
491 /* Call when the priority on a request has changed and it and its
492 * dependencies may need rescheduling. Note the request itself may
493 * not be ready to run!
495 * Called under the struct_mutex.
497 void (*schedule)(struct i915_request *request,
498 const struct i915_sched_attr *attr);
501 * Cancel all requests on the hardware, or queued for execution.
502 * This should only cancel the ready requests that have been
503 * submitted to the engine (via the engine->submit_request callback).
504 * This is called when marking the device as wedged.
506 void (*cancel_requests)(struct intel_engine_cs *engine);
508 /* Some chipsets are not quite as coherent as advertised and need
509 * an expensive kick to force a true read of the up-to-date seqno.
510 * However, the up-to-date seqno is not always required and the last
511 * seen value is good enough. Note that the seqno will always be
512 * monotonic, even if not coherent.
514 void (*irq_seqno_barrier)(struct intel_engine_cs *engine);
515 void (*cleanup)(struct intel_engine_cs *engine);
517 /* GEN8 signal/wait table - never trust comments!
518 * signal to signal to signal to signal to signal to
519 * RCS VCS BCS VECS VCS2
520 * --------------------------------------------------------------------
521 * RCS | NOP (0x00) | VCS (0x08) | BCS (0x10) | VECS (0x18) | VCS2 (0x20) |
522 * |-------------------------------------------------------------------
523 * VCS | RCS (0x28) | NOP (0x30) | BCS (0x38) | VECS (0x40) | VCS2 (0x48) |
524 * |-------------------------------------------------------------------
525 * BCS | RCS (0x50) | VCS (0x58) | NOP (0x60) | VECS (0x68) | VCS2 (0x70) |
526 * |-------------------------------------------------------------------
527 * VECS | RCS (0x78) | VCS (0x80) | BCS (0x88) | NOP (0x90) | VCS2 (0x98) |
528 * |-------------------------------------------------------------------
529 * VCS2 | RCS (0xa0) | VCS (0xa8) | BCS (0xb0) | VECS (0xb8) | NOP (0xc0) |
530 * |-------------------------------------------------------------------
533 * f(x, y) := (x->id * NUM_RINGS * seqno_size) + (seqno_size * y->id)
534 * ie. transpose of g(x, y)
536 * sync from sync from sync from sync from sync from
537 * RCS VCS BCS VECS VCS2
538 * --------------------------------------------------------------------
539 * RCS | NOP (0x00) | VCS (0x28) | BCS (0x50) | VECS (0x78) | VCS2 (0xa0) |
540 * |-------------------------------------------------------------------
541 * VCS | RCS (0x08) | NOP (0x30) | BCS (0x58) | VECS (0x80) | VCS2 (0xa8) |
542 * |-------------------------------------------------------------------
543 * BCS | RCS (0x10) | VCS (0x38) | NOP (0x60) | VECS (0x88) | VCS2 (0xb0) |
544 * |-------------------------------------------------------------------
545 * VECS | RCS (0x18) | VCS (0x40) | BCS (0x68) | NOP (0x90) | VCS2 (0xb8) |
546 * |-------------------------------------------------------------------
547 * VCS2 | RCS (0x20) | VCS (0x48) | BCS (0x70) | VECS (0x98) | NOP (0xc0) |
548 * |-------------------------------------------------------------------
551 * g(x, y) := (y->id * NUM_RINGS * seqno_size) + (seqno_size * x->id)
552 * ie. transpose of f(x, y)
555 #define GEN6_SEMAPHORE_LAST VECS_HW
556 #define GEN6_NUM_SEMAPHORES (GEN6_SEMAPHORE_LAST + 1)
557 #define GEN6_SEMAPHORES_MASK GENMASK(GEN6_SEMAPHORE_LAST, 0)
559 /* our mbox written by others */
560 u32 wait[GEN6_NUM_SEMAPHORES];
561 /* mboxes this ring signals to */
562 i915_reg_t signal[GEN6_NUM_SEMAPHORES];
566 int (*sync_to)(struct i915_request *rq,
567 struct i915_request *signal);
568 u32 *(*signal)(struct i915_request *rq, u32 *cs);
571 struct intel_engine_execlists execlists;
573 /* Contexts are pinned whilst they are active on the GPU. The last
574 * context executed remains active whilst the GPU is idle - the
575 * switch away and write to the context object only occurs on the
576 * next execution. Contexts are only unpinned on retirement of the
577 * following request ensuring that we can always write to the object
578 * on the context switch even after idling. Across suspend, we switch
579 * to the kernel context and trash it as the save may not happen
580 * before the hardware is powered down.
582 struct intel_context *last_retired_context;
584 /* status_notifier: list of callbacks for context-switch changes */
585 struct atomic_notifier_head context_status_notifier;
587 struct intel_engine_hangcheck hangcheck;
589 #define I915_ENGINE_NEEDS_CMD_PARSER BIT(0)
590 #define I915_ENGINE_SUPPORTS_STATS BIT(1)
591 #define I915_ENGINE_HAS_PREEMPTION BIT(2)
595 * Table of commands the command parser needs to know about
598 DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER);
601 * Table of registers allowed in commands that read/write registers.
603 const struct drm_i915_reg_table *reg_tables;
607 * Returns the bitmask for the length field of the specified command.
608 * Return 0 for an unrecognized/invalid command.
610 * If the command parser finds an entry for a command in the engine's
611 * cmd_tables, it gets the command's length based on the table entry.
612 * If not, it calls this function to determine the per-engine length
613 * field encoding for the command (i.e. different opcode ranges use
614 * certain bits to encode the command length in the header).
616 u32 (*get_cmd_length_mask)(u32 cmd_header);
620 * @lock: Lock protecting the below fields.
624 * @enabled: Reference count indicating number of listeners.
626 unsigned int enabled;
628 * @active: Number of contexts currently scheduled in.
632 * @enabled_at: Timestamp when busy stats were enabled.
636 * @start: Timestamp of the last idle to active transition.
638 * Idle is defined as active == 0, active is active > 0.
642 * @total: Total time this engine was busy.
644 * Accumulated time not counting the most recent block in cases
645 * where engine is currently busy (active > 0).
652 intel_engine_needs_cmd_parser(const struct intel_engine_cs *engine)
654 return engine->flags & I915_ENGINE_NEEDS_CMD_PARSER;
658 intel_engine_supports_stats(const struct intel_engine_cs *engine)
660 return engine->flags & I915_ENGINE_SUPPORTS_STATS;
664 intel_engine_has_preemption(const struct intel_engine_cs *engine)
666 return engine->flags & I915_ENGINE_HAS_PREEMPTION;
669 static inline bool __execlists_need_preempt(int prio, int last)
671 return prio > max(0, last);
675 execlists_set_active(struct intel_engine_execlists *execlists,
678 __set_bit(bit, (unsigned long *)&execlists->active);
682 execlists_set_active_once(struct intel_engine_execlists *execlists,
685 return !__test_and_set_bit(bit, (unsigned long *)&execlists->active);
689 execlists_clear_active(struct intel_engine_execlists *execlists,
692 __clear_bit(bit, (unsigned long *)&execlists->active);
696 execlists_clear_all_active(struct intel_engine_execlists *execlists)
698 execlists->active = 0;
702 execlists_is_active(const struct intel_engine_execlists *execlists,
705 return test_bit(bit, (unsigned long *)&execlists->active);
708 void execlists_user_begin(struct intel_engine_execlists *execlists,
709 const struct execlist_port *port);
710 void execlists_user_end(struct intel_engine_execlists *execlists);
713 execlists_cancel_port_requests(struct intel_engine_execlists * const execlists);
716 execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists);
718 static inline unsigned int
719 execlists_num_ports(const struct intel_engine_execlists * const execlists)
721 return execlists->port_mask + 1;
724 static inline struct execlist_port *
725 execlists_port_complete(struct intel_engine_execlists * const execlists,
726 struct execlist_port * const port)
728 const unsigned int m = execlists->port_mask;
730 GEM_BUG_ON(port_index(port, execlists) != 0);
731 GEM_BUG_ON(!execlists_is_active(execlists, EXECLISTS_ACTIVE_USER));
733 memmove(port, port + 1, m * sizeof(struct execlist_port));
734 memset(port + m, 0, sizeof(struct execlist_port));
739 static inline unsigned int
740 intel_engine_flag(const struct intel_engine_cs *engine)
742 return BIT(engine->id);
746 intel_read_status_page(const struct intel_engine_cs *engine, int reg)
748 /* Ensure that the compiler doesn't optimize away the load. */
749 return READ_ONCE(engine->status_page.page_addr[reg]);
753 intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value)
755 /* Writing into the status page should be done sparingly. Since
756 * we do when we are uncertain of the device state, we take a bit
757 * of extra paranoia to try and ensure that the HWS takes the value
758 * we give and that it doesn't end up trapped inside the CPU!
760 if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
762 clflush(&engine->status_page.page_addr[reg]);
763 engine->status_page.page_addr[reg] = value;
764 clflush(&engine->status_page.page_addr[reg]);
767 WRITE_ONCE(engine->status_page.page_addr[reg], value);
772 * Reads a dword out of the status page, which is written to from the command
773 * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
776 * The following dwords have a reserved meaning:
777 * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
778 * 0x04: ring 0 head pointer
779 * 0x05: ring 1 head pointer (915-class)
780 * 0x06: ring 2 head pointer (915-class)
781 * 0x10-0x1b: Context status DWords (GM45)
782 * 0x1f: Last written status offset. (GM45)
783 * 0x20-0x2f: Reserved (Gen6+)
785 * The area from dword 0x30 to 0x3ff is available for driver usage.
787 #define I915_GEM_HWS_INDEX 0x30
788 #define I915_GEM_HWS_INDEX_ADDR (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
789 #define I915_GEM_HWS_PREEMPT_INDEX 0x32
790 #define I915_GEM_HWS_PREEMPT_ADDR (I915_GEM_HWS_PREEMPT_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
791 #define I915_GEM_HWS_SCRATCH_INDEX 0x40
792 #define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
794 #define I915_HWS_CSB_BUF0_INDEX 0x10
795 #define I915_HWS_CSB_WRITE_INDEX 0x1f
796 #define CNL_HWS_CSB_WRITE_INDEX 0x2f
799 intel_engine_create_ring(struct intel_engine_cs *engine,
800 struct i915_timeline *timeline,
802 int intel_ring_pin(struct intel_ring *ring,
803 struct drm_i915_private *i915,
804 unsigned int offset_bias);
805 void intel_ring_reset(struct intel_ring *ring, u32 tail);
806 unsigned int intel_ring_update_space(struct intel_ring *ring);
807 void intel_ring_unpin(struct intel_ring *ring);
808 void intel_ring_free(struct intel_ring *ring);
810 void intel_engine_stop(struct intel_engine_cs *engine);
811 void intel_engine_cleanup(struct intel_engine_cs *engine);
813 void intel_legacy_submission_resume(struct drm_i915_private *dev_priv);
815 int __must_check intel_ring_cacheline_align(struct i915_request *rq);
817 int intel_ring_wait_for_space(struct intel_ring *ring, unsigned int bytes);
818 u32 __must_check *intel_ring_begin(struct i915_request *rq, unsigned int n);
820 static inline void intel_ring_advance(struct i915_request *rq, u32 *cs)
824 * This serves as a placeholder in the code so that the reader
825 * can compare against the preceding intel_ring_begin() and
826 * check that the number of dwords emitted matches the space
827 * reserved for the command packet (i.e. the value passed to
828 * intel_ring_begin()).
830 GEM_BUG_ON((rq->ring->vaddr + rq->ring->emit) != cs);
833 static inline u32 intel_ring_wrap(const struct intel_ring *ring, u32 pos)
835 return pos & (ring->size - 1);
839 intel_ring_offset_valid(const struct intel_ring *ring,
842 if (pos & -ring->size) /* must be strictly within the ring */
845 if (!IS_ALIGNED(pos, 8)) /* must be qword aligned */
851 static inline u32 intel_ring_offset(const struct i915_request *rq, void *addr)
853 /* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */
854 u32 offset = addr - rq->ring->vaddr;
855 GEM_BUG_ON(offset > rq->ring->size);
856 return intel_ring_wrap(rq->ring, offset);
860 assert_ring_tail_valid(const struct intel_ring *ring, unsigned int tail)
862 GEM_BUG_ON(!intel_ring_offset_valid(ring, tail));
866 * Gen2 BSpec "1. Programming Environment" / 1.4.4.6
867 * Gen3 BSpec "1c Memory Interface Functions" / 2.3.4.5
868 * Gen4+ BSpec "1c Memory Interface and Command Stream" / 5.3.4.5
869 * "If the Ring Buffer Head Pointer and the Tail Pointer are on the
870 * same cacheline, the Head Pointer must not be greater than the Tail
873 * We use ring->head as the last known location of the actual RING_HEAD,
874 * it may have advanced but in the worst case it is equally the same
875 * as ring->head and so we should never program RING_TAIL to advance
876 * into the same cacheline as ring->head.
878 #define cacheline(a) round_down(a, CACHELINE_BYTES)
879 GEM_BUG_ON(cacheline(tail) == cacheline(ring->head) &&
884 static inline unsigned int
885 intel_ring_set_tail(struct intel_ring *ring, unsigned int tail)
887 /* Whilst writes to the tail are strictly order, there is no
888 * serialisation between readers and the writers. The tail may be
889 * read by i915_request_retire() just as it is being updated
890 * by execlists, as although the breadcrumb is complete, the context
891 * switch hasn't been seen.
893 assert_ring_tail_valid(ring, tail);
898 void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno);
900 void intel_engine_setup_common(struct intel_engine_cs *engine);
901 int intel_engine_init_common(struct intel_engine_cs *engine);
902 void intel_engine_cleanup_common(struct intel_engine_cs *engine);
904 int intel_engine_create_scratch(struct intel_engine_cs *engine,
906 void intel_engine_cleanup_scratch(struct intel_engine_cs *engine);
908 int intel_init_render_ring_buffer(struct intel_engine_cs *engine);
909 int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine);
910 int intel_init_blt_ring_buffer(struct intel_engine_cs *engine);
911 int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine);
913 int intel_engine_stop_cs(struct intel_engine_cs *engine);
915 u64 intel_engine_get_active_head(const struct intel_engine_cs *engine);
916 u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine);
918 static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine)
920 return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
923 static inline u32 intel_engine_last_submit(struct intel_engine_cs *engine)
925 /* We are only peeking at the tail of the submit queue (and not the
926 * queue itself) in order to gain a hint as to the current active
927 * state of the engine. Callers are not expected to be taking
928 * engine->timeline->lock, nor are they expected to be concerned
929 * wtih serialising this hint with anything, so document it as
930 * a hint and nothing more.
932 return READ_ONCE(engine->timeline.seqno);
935 void intel_engine_get_instdone(struct intel_engine_cs *engine,
936 struct intel_instdone *instdone);
939 * Arbitrary size for largest possible 'add request' sequence. The code paths
940 * are complex and variable. Empirical measurement shows that the worst case
941 * is BDW at 192 bytes (6 + 6 + 36 dwords), then ILK at 136 bytes. However,
942 * we need to allocate double the largest single packet within that emission
943 * to account for tail wraparound (so 6 + 6 + 72 dwords for BDW).
945 #define MIN_SPACE_FOR_ADD_REQUEST 336
947 static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine)
949 return engine->status_page.ggtt_offset + I915_GEM_HWS_INDEX_ADDR;
952 static inline u32 intel_hws_preempt_done_address(struct intel_engine_cs *engine)
954 return engine->status_page.ggtt_offset + I915_GEM_HWS_PREEMPT_ADDR;
957 /* intel_breadcrumbs.c -- user interrupt bottom-half for waiters */
958 int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine);
960 static inline void intel_wait_init(struct intel_wait *wait)
963 wait->request = NULL;
966 static inline void intel_wait_init_for_seqno(struct intel_wait *wait, u32 seqno)
972 static inline bool intel_wait_has_seqno(const struct intel_wait *wait)
978 intel_wait_update_seqno(struct intel_wait *wait, u32 seqno)
981 return intel_wait_has_seqno(wait);
985 intel_wait_update_request(struct intel_wait *wait,
986 const struct i915_request *rq)
988 return intel_wait_update_seqno(wait, i915_request_global_seqno(rq));
992 intel_wait_check_seqno(const struct intel_wait *wait, u32 seqno)
994 return wait->seqno == seqno;
998 intel_wait_check_request(const struct intel_wait *wait,
999 const struct i915_request *rq)
1001 return intel_wait_check_seqno(wait, i915_request_global_seqno(rq));
1004 static inline bool intel_wait_complete(const struct intel_wait *wait)
1006 return RB_EMPTY_NODE(&wait->node);
1009 bool intel_engine_add_wait(struct intel_engine_cs *engine,
1010 struct intel_wait *wait);
1011 void intel_engine_remove_wait(struct intel_engine_cs *engine,
1012 struct intel_wait *wait);
1013 bool intel_engine_enable_signaling(struct i915_request *request, bool wakeup);
1014 void intel_engine_cancel_signaling(struct i915_request *request);
1016 static inline bool intel_engine_has_waiter(const struct intel_engine_cs *engine)
1018 return READ_ONCE(engine->breadcrumbs.irq_wait);
1021 unsigned int intel_engine_wakeup(struct intel_engine_cs *engine);
1022 #define ENGINE_WAKEUP_WAITER BIT(0)
1023 #define ENGINE_WAKEUP_ASLEEP BIT(1)
1025 void intel_engine_pin_breadcrumbs_irq(struct intel_engine_cs *engine);
1026 void intel_engine_unpin_breadcrumbs_irq(struct intel_engine_cs *engine);
1028 void __intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine);
1029 void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine);
1031 void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine);
1032 void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
1034 static inline u32 *gen8_emit_pipe_control(u32 *batch, u32 flags, u32 offset)
1036 memset(batch, 0, 6 * sizeof(u32));
1038 batch[0] = GFX_OP_PIPE_CONTROL(6);
1046 gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset)
1048 /* We're using qword write, offset should be aligned to 8 bytes. */
1049 GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
1051 /* w/a for post sync ops following a GPGPU operation we
1052 * need a prior CS_STALL, which is emitted by the flush
1053 * following the batch.
1055 *cs++ = GFX_OP_PIPE_CONTROL(6);
1056 *cs++ = PIPE_CONTROL_GLOBAL_GTT_IVB | PIPE_CONTROL_CS_STALL |
1057 PIPE_CONTROL_QW_WRITE;
1061 /* We're thrashing one dword of HWS. */
1068 gen8_emit_ggtt_write(u32 *cs, u32 value, u32 gtt_offset)
1070 /* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
1071 GEM_BUG_ON(gtt_offset & (1 << 5));
1072 /* Offset should be aligned to 8 bytes for both (QW/DW) write types */
1073 GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
1075 *cs++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW;
1076 *cs++ = gtt_offset | MI_FLUSH_DW_USE_GTT;
1083 void intel_engines_sanitize(struct drm_i915_private *i915);
1085 bool intel_engine_is_idle(struct intel_engine_cs *engine);
1086 bool intel_engines_are_idle(struct drm_i915_private *dev_priv);
1088 bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine);
1089 void intel_engine_lost_context(struct intel_engine_cs *engine);
1091 void intel_engines_park(struct drm_i915_private *i915);
1092 void intel_engines_unpark(struct drm_i915_private *i915);
1094 void intel_engines_reset_default_submission(struct drm_i915_private *i915);
1095 unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915);
1097 bool intel_engine_can_store_dword(struct intel_engine_cs *engine);
1100 void intel_engine_dump(struct intel_engine_cs *engine,
1101 struct drm_printer *m,
1102 const char *header, ...);
1104 struct intel_engine_cs *
1105 intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance);
1107 static inline void intel_engine_context_in(struct intel_engine_cs *engine)
1109 unsigned long flags;
1111 if (READ_ONCE(engine->stats.enabled) == 0)
1114 write_seqlock_irqsave(&engine->stats.lock, flags);
1116 if (engine->stats.enabled > 0) {
1117 if (engine->stats.active++ == 0)
1118 engine->stats.start = ktime_get();
1119 GEM_BUG_ON(engine->stats.active == 0);
1122 write_sequnlock_irqrestore(&engine->stats.lock, flags);
1125 static inline void intel_engine_context_out(struct intel_engine_cs *engine)
1127 unsigned long flags;
1129 if (READ_ONCE(engine->stats.enabled) == 0)
1132 write_seqlock_irqsave(&engine->stats.lock, flags);
1134 if (engine->stats.enabled > 0) {
1137 if (engine->stats.active && --engine->stats.active == 0) {
1139 * Decrement the active context count and in case GPU
1140 * is now idle add up to the running total.
1142 last = ktime_sub(ktime_get(), engine->stats.start);
1144 engine->stats.total = ktime_add(engine->stats.total,
1146 } else if (engine->stats.active == 0) {
1148 * After turning on engine stats, context out might be
1149 * the first event in which case we account from the
1150 * time stats gathering was turned on.
1152 last = ktime_sub(ktime_get(), engine->stats.enabled_at);
1154 engine->stats.total = ktime_add(engine->stats.total,
1159 write_sequnlock_irqrestore(&engine->stats.lock, flags);
1162 int intel_enable_engine_stats(struct intel_engine_cs *engine);
1163 void intel_disable_engine_stats(struct intel_engine_cs *engine);
1165 ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine);
1167 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1169 static inline bool inject_preempt_hang(struct intel_engine_execlists *execlists)
1171 if (!execlists->preempt_hang.inject_hang)
1174 complete(&execlists->preempt_hang.completion);
1180 static inline bool inject_preempt_hang(struct intel_engine_execlists *execlists)
1187 #endif /* _INTEL_RINGBUFFER_H_ */