1 /* SPDX-License-Identifier: MIT */
2 #ifndef _INTEL_RINGBUFFER_H_
3 #define _INTEL_RINGBUFFER_H_
5 #include <drm/drm_util.h>
7 #include <linux/hashtable.h>
8 #include <linux/irq_work.h>
9 #include <linux/random.h>
10 #include <linux/seqlock.h>
12 #include "i915_gem_batch_pool.h"
15 #include "i915_request.h"
16 #include "i915_selftest.h"
17 #include "i915_timeline.h"
18 #include "intel_engine_types.h"
19 #include "intel_gpu_commands.h"
20 #include "intel_workarounds.h"
24 /* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
25 * but keeps the logic simple. Indeed, the whole purpose of this macro is just
26 * to give some inclination as to some of the magic values used in the various
29 #define CACHELINE_BYTES 64
30 #define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(u32))
32 #define I915_READ_TAIL(engine) I915_READ(RING_TAIL((engine)->mmio_base))
33 #define I915_WRITE_TAIL(engine, val) I915_WRITE(RING_TAIL((engine)->mmio_base), val)
35 #define I915_READ_START(engine) I915_READ(RING_START((engine)->mmio_base))
36 #define I915_WRITE_START(engine, val) I915_WRITE(RING_START((engine)->mmio_base), val)
38 #define I915_READ_HEAD(engine) I915_READ(RING_HEAD((engine)->mmio_base))
39 #define I915_WRITE_HEAD(engine, val) I915_WRITE(RING_HEAD((engine)->mmio_base), val)
41 #define I915_READ_CTL(engine) I915_READ(RING_CTL((engine)->mmio_base))
42 #define I915_WRITE_CTL(engine, val) I915_WRITE(RING_CTL((engine)->mmio_base), val)
44 #define I915_READ_IMR(engine) I915_READ(RING_IMR((engine)->mmio_base))
45 #define I915_WRITE_IMR(engine, val) I915_WRITE(RING_IMR((engine)->mmio_base), val)
47 #define I915_READ_MODE(engine) I915_READ(RING_MI_MODE((engine)->mmio_base))
48 #define I915_WRITE_MODE(engine, val) I915_WRITE(RING_MI_MODE((engine)->mmio_base), val)
50 /* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to
51 * do the writes, and that must have qw aligned offsets, simply pretend it's 8b.
53 enum intel_engine_hangcheck_action {
58 ENGINE_ACTIVE_SUBUNITS,
63 static inline const char *
64 hangcheck_action_to_str(const enum intel_engine_hangcheck_action a)
71 case ENGINE_ACTIVE_SEQNO:
72 return "active seqno";
73 case ENGINE_ACTIVE_HEAD:
75 case ENGINE_ACTIVE_SUBUNITS:
76 return "active subunits";
77 case ENGINE_WAIT_KICK:
86 void intel_engines_set_scheduler_caps(struct drm_i915_private *i915);
88 static inline bool __execlists_need_preempt(int prio, int last)
91 * Allow preemption of low -> normal -> high, but we do
92 * not allow low priority tasks to preempt other low priority
93 * tasks under the impression that latency for low priority
94 * tasks does not matter (as much as background throughput),
97 * More naturally we would write
98 * prio >= max(0, last);
99 * except that we wish to prevent triggering preemption at the same
100 * priority level: the task that is running should remain running
101 * to preserve FIFO ordering of dependencies.
103 return prio > max(I915_PRIORITY_NORMAL - 1, last);
107 execlists_set_active(struct intel_engine_execlists *execlists,
110 __set_bit(bit, (unsigned long *)&execlists->active);
114 execlists_set_active_once(struct intel_engine_execlists *execlists,
117 return !__test_and_set_bit(bit, (unsigned long *)&execlists->active);
121 execlists_clear_active(struct intel_engine_execlists *execlists,
124 __clear_bit(bit, (unsigned long *)&execlists->active);
128 execlists_clear_all_active(struct intel_engine_execlists *execlists)
130 execlists->active = 0;
134 execlists_is_active(const struct intel_engine_execlists *execlists,
137 return test_bit(bit, (unsigned long *)&execlists->active);
140 void execlists_user_begin(struct intel_engine_execlists *execlists,
141 const struct execlist_port *port);
142 void execlists_user_end(struct intel_engine_execlists *execlists);
145 execlists_cancel_port_requests(struct intel_engine_execlists * const execlists);
148 execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists);
150 static inline unsigned int
151 execlists_num_ports(const struct intel_engine_execlists * const execlists)
153 return execlists->port_mask + 1;
156 static inline struct execlist_port *
157 execlists_port_complete(struct intel_engine_execlists * const execlists,
158 struct execlist_port * const port)
160 const unsigned int m = execlists->port_mask;
162 GEM_BUG_ON(port_index(port, execlists) != 0);
163 GEM_BUG_ON(!execlists_is_active(execlists, EXECLISTS_ACTIVE_USER));
165 memmove(port, port + 1, m * sizeof(struct execlist_port));
166 memset(port + m, 0, sizeof(struct execlist_port));
172 intel_read_status_page(const struct intel_engine_cs *engine, int reg)
174 /* Ensure that the compiler doesn't optimize away the load. */
175 return READ_ONCE(engine->status_page.addr[reg]);
179 intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value)
181 /* Writing into the status page should be done sparingly. Since
182 * we do when we are uncertain of the device state, we take a bit
183 * of extra paranoia to try and ensure that the HWS takes the value
184 * we give and that it doesn't end up trapped inside the CPU!
186 if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
188 clflush(&engine->status_page.addr[reg]);
189 engine->status_page.addr[reg] = value;
190 clflush(&engine->status_page.addr[reg]);
193 WRITE_ONCE(engine->status_page.addr[reg], value);
198 * Reads a dword out of the status page, which is written to from the command
199 * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
202 * The following dwords have a reserved meaning:
203 * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
204 * 0x04: ring 0 head pointer
205 * 0x05: ring 1 head pointer (915-class)
206 * 0x06: ring 2 head pointer (915-class)
207 * 0x10-0x1b: Context status DWords (GM45)
208 * 0x1f: Last written status offset. (GM45)
209 * 0x20-0x2f: Reserved (Gen6+)
211 * The area from dword 0x30 to 0x3ff is available for driver usage.
213 #define I915_GEM_HWS_PREEMPT 0x32
214 #define I915_GEM_HWS_PREEMPT_ADDR (I915_GEM_HWS_PREEMPT * sizeof(u32))
215 #define I915_GEM_HWS_HANGCHECK 0x34
216 #define I915_GEM_HWS_HANGCHECK_ADDR (I915_GEM_HWS_HANGCHECK * sizeof(u32))
217 #define I915_GEM_HWS_SEQNO 0x40
218 #define I915_GEM_HWS_SEQNO_ADDR (I915_GEM_HWS_SEQNO * sizeof(u32))
219 #define I915_GEM_HWS_SCRATCH 0x80
220 #define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH * sizeof(u32))
222 #define I915_HWS_CSB_BUF0_INDEX 0x10
223 #define I915_HWS_CSB_WRITE_INDEX 0x1f
224 #define CNL_HWS_CSB_WRITE_INDEX 0x2f
227 intel_engine_create_ring(struct intel_engine_cs *engine,
228 struct i915_timeline *timeline,
230 int intel_ring_pin(struct intel_ring *ring);
231 void intel_ring_reset(struct intel_ring *ring, u32 tail);
232 unsigned int intel_ring_update_space(struct intel_ring *ring);
233 void intel_ring_unpin(struct intel_ring *ring);
234 void intel_ring_free(struct kref *ref);
236 static inline struct intel_ring *intel_ring_get(struct intel_ring *ring)
238 kref_get(&ring->ref);
242 static inline void intel_ring_put(struct intel_ring *ring)
244 kref_put(&ring->ref, intel_ring_free);
247 void intel_engine_stop(struct intel_engine_cs *engine);
248 void intel_engine_cleanup(struct intel_engine_cs *engine);
250 void intel_legacy_submission_resume(struct drm_i915_private *dev_priv);
252 int __must_check intel_ring_cacheline_align(struct i915_request *rq);
254 u32 __must_check *intel_ring_begin(struct i915_request *rq, unsigned int n);
256 static inline void intel_ring_advance(struct i915_request *rq, u32 *cs)
260 * This serves as a placeholder in the code so that the reader
261 * can compare against the preceding intel_ring_begin() and
262 * check that the number of dwords emitted matches the space
263 * reserved for the command packet (i.e. the value passed to
264 * intel_ring_begin()).
266 GEM_BUG_ON((rq->ring->vaddr + rq->ring->emit) != cs);
269 static inline u32 intel_ring_wrap(const struct intel_ring *ring, u32 pos)
271 return pos & (ring->size - 1);
275 intel_ring_offset_valid(const struct intel_ring *ring,
278 if (pos & -ring->size) /* must be strictly within the ring */
281 if (!IS_ALIGNED(pos, 8)) /* must be qword aligned */
287 static inline u32 intel_ring_offset(const struct i915_request *rq, void *addr)
289 /* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */
290 u32 offset = addr - rq->ring->vaddr;
291 GEM_BUG_ON(offset > rq->ring->size);
292 return intel_ring_wrap(rq->ring, offset);
296 assert_ring_tail_valid(const struct intel_ring *ring, unsigned int tail)
298 GEM_BUG_ON(!intel_ring_offset_valid(ring, tail));
302 * Gen2 BSpec "1. Programming Environment" / 1.4.4.6
303 * Gen3 BSpec "1c Memory Interface Functions" / 2.3.4.5
304 * Gen4+ BSpec "1c Memory Interface and Command Stream" / 5.3.4.5
305 * "If the Ring Buffer Head Pointer and the Tail Pointer are on the
306 * same cacheline, the Head Pointer must not be greater than the Tail
309 * We use ring->head as the last known location of the actual RING_HEAD,
310 * it may have advanced but in the worst case it is equally the same
311 * as ring->head and so we should never program RING_TAIL to advance
312 * into the same cacheline as ring->head.
314 #define cacheline(a) round_down(a, CACHELINE_BYTES)
315 GEM_BUG_ON(cacheline(tail) == cacheline(ring->head) &&
320 static inline unsigned int
321 intel_ring_set_tail(struct intel_ring *ring, unsigned int tail)
323 /* Whilst writes to the tail are strictly order, there is no
324 * serialisation between readers and the writers. The tail may be
325 * read by i915_request_retire() just as it is being updated
326 * by execlists, as although the breadcrumb is complete, the context
327 * switch hasn't been seen.
329 assert_ring_tail_valid(ring, tail);
334 static inline unsigned int
335 __intel_ring_space(unsigned int head, unsigned int tail, unsigned int size)
338 * "If the Ring Buffer Head Pointer and the Tail Pointer are on the
339 * same cacheline, the Head Pointer must not be greater than the Tail
342 GEM_BUG_ON(!is_power_of_2(size));
343 return (head - tail - CACHELINE_BYTES) & (size - 1);
346 int intel_engine_setup_common(struct intel_engine_cs *engine);
347 int intel_engine_init_common(struct intel_engine_cs *engine);
348 void intel_engine_cleanup_common(struct intel_engine_cs *engine);
350 int intel_init_render_ring_buffer(struct intel_engine_cs *engine);
351 int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine);
352 int intel_init_blt_ring_buffer(struct intel_engine_cs *engine);
353 int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine);
355 int intel_engine_stop_cs(struct intel_engine_cs *engine);
356 void intel_engine_cancel_stop_cs(struct intel_engine_cs *engine);
358 void intel_engine_set_hwsp_writemask(struct intel_engine_cs *engine, u32 mask);
360 u64 intel_engine_get_active_head(const struct intel_engine_cs *engine);
361 u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine);
363 void intel_engine_get_instdone(struct intel_engine_cs *engine,
364 struct intel_instdone *instdone);
366 void intel_engine_init_breadcrumbs(struct intel_engine_cs *engine);
367 void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
369 void intel_engine_pin_breadcrumbs_irq(struct intel_engine_cs *engine);
370 void intel_engine_unpin_breadcrumbs_irq(struct intel_engine_cs *engine);
372 bool intel_engine_signal_breadcrumbs(struct intel_engine_cs *engine);
373 void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine);
376 intel_engine_queue_breadcrumbs(struct intel_engine_cs *engine)
378 irq_work_queue(&engine->breadcrumbs.irq_work);
381 bool intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine);
383 void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine);
384 void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
386 void intel_engine_print_breadcrumbs(struct intel_engine_cs *engine,
387 struct drm_printer *p);
389 static inline u32 *gen8_emit_pipe_control(u32 *batch, u32 flags, u32 offset)
391 memset(batch, 0, 6 * sizeof(u32));
393 batch[0] = GFX_OP_PIPE_CONTROL(6);
401 gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
403 /* We're using qword write, offset should be aligned to 8 bytes. */
404 GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
406 /* w/a for post sync ops following a GPGPU operation we
407 * need a prior CS_STALL, which is emitted by the flush
408 * following the batch.
410 *cs++ = GFX_OP_PIPE_CONTROL(6);
411 *cs++ = flags | PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_GLOBAL_GTT_IVB;
415 /* We're thrashing one dword of HWS. */
422 gen8_emit_ggtt_write(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
424 /* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
425 GEM_BUG_ON(gtt_offset & (1 << 5));
426 /* Offset should be aligned to 8 bytes for both (QW/DW) write types */
427 GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
429 *cs++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW | flags;
430 *cs++ = gtt_offset | MI_FLUSH_DW_USE_GTT;
437 static inline void intel_engine_reset(struct intel_engine_cs *engine,
440 if (engine->reset.reset)
441 engine->reset.reset(engine, stalled);
444 void intel_engines_sanitize(struct drm_i915_private *i915, bool force);
446 bool intel_engine_is_idle(struct intel_engine_cs *engine);
447 bool intel_engines_are_idle(struct drm_i915_private *dev_priv);
449 void intel_engine_lost_context(struct intel_engine_cs *engine);
451 void intel_engines_park(struct drm_i915_private *i915);
452 void intel_engines_unpark(struct drm_i915_private *i915);
454 void intel_engines_reset_default_submission(struct drm_i915_private *i915);
455 unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915);
457 bool intel_engine_can_store_dword(struct intel_engine_cs *engine);
460 void intel_engine_dump(struct intel_engine_cs *engine,
461 struct drm_printer *m,
462 const char *header, ...);
464 struct intel_engine_cs *
465 intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance);
467 static inline void intel_engine_context_in(struct intel_engine_cs *engine)
471 if (READ_ONCE(engine->stats.enabled) == 0)
474 write_seqlock_irqsave(&engine->stats.lock, flags);
476 if (engine->stats.enabled > 0) {
477 if (engine->stats.active++ == 0)
478 engine->stats.start = ktime_get();
479 GEM_BUG_ON(engine->stats.active == 0);
482 write_sequnlock_irqrestore(&engine->stats.lock, flags);
485 static inline void intel_engine_context_out(struct intel_engine_cs *engine)
489 if (READ_ONCE(engine->stats.enabled) == 0)
492 write_seqlock_irqsave(&engine->stats.lock, flags);
494 if (engine->stats.enabled > 0) {
497 if (engine->stats.active && --engine->stats.active == 0) {
499 * Decrement the active context count and in case GPU
500 * is now idle add up to the running total.
502 last = ktime_sub(ktime_get(), engine->stats.start);
504 engine->stats.total = ktime_add(engine->stats.total,
506 } else if (engine->stats.active == 0) {
508 * After turning on engine stats, context out might be
509 * the first event in which case we account from the
510 * time stats gathering was turned on.
512 last = ktime_sub(ktime_get(), engine->stats.enabled_at);
514 engine->stats.total = ktime_add(engine->stats.total,
519 write_sequnlock_irqrestore(&engine->stats.lock, flags);
522 int intel_enable_engine_stats(struct intel_engine_cs *engine);
523 void intel_disable_engine_stats(struct intel_engine_cs *engine);
525 ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine);
527 struct i915_request *
528 intel_engine_find_active_request(struct intel_engine_cs *engine);
530 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
532 static inline bool inject_preempt_hang(struct intel_engine_execlists *execlists)
534 if (!execlists->preempt_hang.inject_hang)
537 complete(&execlists->preempt_hang.completion);
543 static inline bool inject_preempt_hang(struct intel_engine_execlists *execlists)
551 intel_engine_next_hangcheck_seqno(struct intel_engine_cs *engine)
553 return engine->hangcheck.next_seqno =
554 next_pseudo_random32(engine->hangcheck.next_seqno);
558 intel_engine_get_hangcheck_seqno(struct intel_engine_cs *engine)
560 return intel_read_status_page(engine, I915_GEM_HWS_HANGCHECK);
563 #endif /* _INTEL_RINGBUFFER_H_ */