2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
25 * Keith Packard <keithp@keithp.com>
29 #include <linux/sched/mm.h>
30 #include <linux/sort.h>
32 #include <drm/drm_debugfs.h>
33 #include <drm/drm_fourcc.h>
35 #include "display/intel_display_types.h"
36 #include "display/intel_dp.h"
37 #include "display/intel_fbc.h"
38 #include "display/intel_hdcp.h"
39 #include "display/intel_hdmi.h"
40 #include "display/intel_psr.h"
42 #include "gem/i915_gem_context.h"
43 #include "gt/intel_gt_pm.h"
44 #include "gt/intel_reset.h"
45 #include "gt/uc/intel_guc_submission.h"
47 #include "i915_debugfs.h"
49 #include "i915_trace.h"
50 #include "intel_csr.h"
52 #include "intel_sideband.h"
54 static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
56 return to_i915(node->minor->dev);
59 static int i915_capabilities(struct seq_file *m, void *data)
61 struct drm_i915_private *dev_priv = node_to_i915(m->private);
62 const struct intel_device_info *info = INTEL_INFO(dev_priv);
63 struct drm_printer p = drm_seq_file_printer(m);
65 seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv));
66 seq_printf(m, "platform: %s\n", intel_platform_name(info->platform));
67 seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
69 intel_device_info_dump_flags(info, &p);
70 intel_device_info_dump_runtime(RUNTIME_INFO(dev_priv), &p);
71 intel_driver_caps_print(&dev_priv->caps, &p);
73 kernel_param_lock(THIS_MODULE);
74 i915_params_dump(&i915_modparams, &p);
75 kernel_param_unlock(THIS_MODULE);
80 static char get_pin_flag(struct drm_i915_gem_object *obj)
82 return obj->pin_global ? 'p' : ' ';
85 static char get_tiling_flag(struct drm_i915_gem_object *obj)
87 switch (i915_gem_object_get_tiling(obj)) {
89 case I915_TILING_NONE: return ' ';
90 case I915_TILING_X: return 'X';
91 case I915_TILING_Y: return 'Y';
95 static char get_global_flag(struct drm_i915_gem_object *obj)
97 return READ_ONCE(obj->userfault_count) ? 'g' : ' ';
100 static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
102 return obj->mm.mapping ? 'M' : ' ';
106 stringify_page_sizes(unsigned int page_sizes, char *buf, size_t len)
110 switch (page_sizes) {
113 case I915_GTT_PAGE_SIZE_4K:
115 case I915_GTT_PAGE_SIZE_64K:
117 case I915_GTT_PAGE_SIZE_2M:
123 if (page_sizes & I915_GTT_PAGE_SIZE_2M)
124 x += snprintf(buf + x, len - x, "2M, ");
125 if (page_sizes & I915_GTT_PAGE_SIZE_64K)
126 x += snprintf(buf + x, len - x, "64K, ");
127 if (page_sizes & I915_GTT_PAGE_SIZE_4K)
128 x += snprintf(buf + x, len - x, "4K, ");
136 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
138 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
139 struct intel_engine_cs *engine;
140 struct i915_vma *vma;
143 seq_printf(m, "%pK: %c%c%c%c %8zdKiB %02x %02x %s%s%s",
146 get_tiling_flag(obj),
147 get_global_flag(obj),
148 get_pin_mapped_flag(obj),
149 obj->base.size / 1024,
152 i915_cache_level_str(dev_priv, obj->cache_level),
153 obj->mm.dirty ? " dirty" : "",
154 obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
156 seq_printf(m, " (name: %d)", obj->base.name);
158 spin_lock(&obj->vma.lock);
159 list_for_each_entry(vma, &obj->vma.list, obj_link) {
160 if (!drm_mm_node_allocated(&vma->node))
163 spin_unlock(&obj->vma.lock);
165 if (i915_vma_is_pinned(vma))
168 seq_printf(m, " (%sgtt offset: %08llx, size: %08llx, pages: %s",
169 i915_vma_is_ggtt(vma) ? "g" : "pp",
170 vma->node.start, vma->node.size,
171 stringify_page_sizes(vma->page_sizes.gtt, NULL, 0));
172 if (i915_vma_is_ggtt(vma)) {
173 switch (vma->ggtt_view.type) {
174 case I915_GGTT_VIEW_NORMAL:
175 seq_puts(m, ", normal");
178 case I915_GGTT_VIEW_PARTIAL:
179 seq_printf(m, ", partial [%08llx+%x]",
180 vma->ggtt_view.partial.offset << PAGE_SHIFT,
181 vma->ggtt_view.partial.size << PAGE_SHIFT);
184 case I915_GGTT_VIEW_ROTATED:
185 seq_printf(m, ", rotated [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
186 vma->ggtt_view.rotated.plane[0].width,
187 vma->ggtt_view.rotated.plane[0].height,
188 vma->ggtt_view.rotated.plane[0].stride,
189 vma->ggtt_view.rotated.plane[0].offset,
190 vma->ggtt_view.rotated.plane[1].width,
191 vma->ggtt_view.rotated.plane[1].height,
192 vma->ggtt_view.rotated.plane[1].stride,
193 vma->ggtt_view.rotated.plane[1].offset);
196 case I915_GGTT_VIEW_REMAPPED:
197 seq_printf(m, ", remapped [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
198 vma->ggtt_view.remapped.plane[0].width,
199 vma->ggtt_view.remapped.plane[0].height,
200 vma->ggtt_view.remapped.plane[0].stride,
201 vma->ggtt_view.remapped.plane[0].offset,
202 vma->ggtt_view.remapped.plane[1].width,
203 vma->ggtt_view.remapped.plane[1].height,
204 vma->ggtt_view.remapped.plane[1].stride,
205 vma->ggtt_view.remapped.plane[1].offset);
209 MISSING_CASE(vma->ggtt_view.type);
214 seq_printf(m, " , fence: %d", vma->fence->id);
217 spin_lock(&obj->vma.lock);
219 spin_unlock(&obj->vma.lock);
221 seq_printf(m, " (pinned x %d)", pin_count);
223 seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
225 seq_printf(m, " (global)");
227 engine = i915_gem_object_last_write_engine(obj);
229 seq_printf(m, " (%s)", engine->name);
233 struct i915_address_space *vm;
236 u64 active, inactive;
240 static int per_file_stats(int id, void *ptr, void *data)
242 struct drm_i915_gem_object *obj = ptr;
243 struct file_stats *stats = data;
244 struct i915_vma *vma;
247 stats->total += obj->base.size;
248 if (!atomic_read(&obj->bind_count))
249 stats->unbound += obj->base.size;
251 spin_lock(&obj->vma.lock);
253 for_each_ggtt_vma(vma, obj) {
254 if (!drm_mm_node_allocated(&vma->node))
257 if (i915_vma_is_active(vma))
258 stats->active += vma->node.size;
260 stats->inactive += vma->node.size;
262 if (i915_vma_is_closed(vma))
263 stats->closed += vma->node.size;
266 struct rb_node *p = obj->vma.tree.rb_node;
271 vma = rb_entry(p, typeof(*vma), obj_node);
272 cmp = i915_vma_compare(vma, stats->vm, NULL);
274 if (drm_mm_node_allocated(&vma->node)) {
275 if (i915_vma_is_active(vma))
276 stats->active += vma->node.size;
278 stats->inactive += vma->node.size;
280 if (i915_vma_is_closed(vma))
281 stats->closed += vma->node.size;
291 spin_unlock(&obj->vma.lock);
296 #define print_file_stats(m, name, stats) do { \
298 seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu unbound, %llu closed)\n", \
308 static void print_context_stats(struct seq_file *m,
309 struct drm_i915_private *i915)
311 struct file_stats kstats = {};
312 struct i915_gem_context *ctx;
314 list_for_each_entry(ctx, &i915->contexts.list, link) {
315 struct i915_gem_engines_iter it;
316 struct intel_context *ce;
318 for_each_gem_engine(ce,
319 i915_gem_context_lock_engines(ctx), it) {
320 intel_context_lock_pinned(ce);
321 if (intel_context_is_pinned(ce)) {
324 ce->state->obj, &kstats);
325 per_file_stats(0, ce->ring->vma->obj, &kstats);
327 intel_context_unlock_pinned(ce);
329 i915_gem_context_unlock_engines(ctx);
331 if (!IS_ERR_OR_NULL(ctx->file_priv)) {
332 struct file_stats stats = { .vm = ctx->vm, };
333 struct drm_file *file = ctx->file_priv->file;
334 struct task_struct *task;
337 spin_lock(&file->table_lock);
338 idr_for_each(&file->object_idr, per_file_stats, &stats);
339 spin_unlock(&file->table_lock);
342 task = pid_task(ctx->pid ?: file->pid, PIDTYPE_PID);
343 snprintf(name, sizeof(name), "%s",
344 task ? task->comm : "<unknown>");
347 print_file_stats(m, name, stats);
351 print_file_stats(m, "[k]contexts", kstats);
354 static int i915_gem_object_info(struct seq_file *m, void *data)
356 struct drm_i915_private *i915 = node_to_i915(m->private);
359 seq_printf(m, "%u shrinkable [%u free] objects, %llu bytes\n",
360 i915->mm.shrink_count,
361 atomic_read(&i915->mm.free_count),
362 i915->mm.shrink_memory);
366 ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
370 print_context_stats(m, i915);
371 mutex_unlock(&i915->drm.struct_mutex);
376 static void gen8_display_interrupt_info(struct seq_file *m)
378 struct drm_i915_private *dev_priv = node_to_i915(m->private);
381 for_each_pipe(dev_priv, pipe) {
382 enum intel_display_power_domain power_domain;
383 intel_wakeref_t wakeref;
385 power_domain = POWER_DOMAIN_PIPE(pipe);
386 wakeref = intel_display_power_get_if_enabled(dev_priv,
389 seq_printf(m, "Pipe %c power disabled\n",
393 seq_printf(m, "Pipe %c IMR:\t%08x\n",
395 I915_READ(GEN8_DE_PIPE_IMR(pipe)));
396 seq_printf(m, "Pipe %c IIR:\t%08x\n",
398 I915_READ(GEN8_DE_PIPE_IIR(pipe)));
399 seq_printf(m, "Pipe %c IER:\t%08x\n",
401 I915_READ(GEN8_DE_PIPE_IER(pipe)));
403 intel_display_power_put(dev_priv, power_domain, wakeref);
406 seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
407 I915_READ(GEN8_DE_PORT_IMR));
408 seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
409 I915_READ(GEN8_DE_PORT_IIR));
410 seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
411 I915_READ(GEN8_DE_PORT_IER));
413 seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
414 I915_READ(GEN8_DE_MISC_IMR));
415 seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
416 I915_READ(GEN8_DE_MISC_IIR));
417 seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
418 I915_READ(GEN8_DE_MISC_IER));
420 seq_printf(m, "PCU interrupt mask:\t%08x\n",
421 I915_READ(GEN8_PCU_IMR));
422 seq_printf(m, "PCU interrupt identity:\t%08x\n",
423 I915_READ(GEN8_PCU_IIR));
424 seq_printf(m, "PCU interrupt enable:\t%08x\n",
425 I915_READ(GEN8_PCU_IER));
428 static int i915_interrupt_info(struct seq_file *m, void *data)
430 struct drm_i915_private *dev_priv = node_to_i915(m->private);
431 struct intel_engine_cs *engine;
432 intel_wakeref_t wakeref;
435 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
437 if (IS_CHERRYVIEW(dev_priv)) {
438 intel_wakeref_t pref;
440 seq_printf(m, "Master Interrupt Control:\t%08x\n",
441 I915_READ(GEN8_MASTER_IRQ));
443 seq_printf(m, "Display IER:\t%08x\n",
445 seq_printf(m, "Display IIR:\t%08x\n",
447 seq_printf(m, "Display IIR_RW:\t%08x\n",
448 I915_READ(VLV_IIR_RW));
449 seq_printf(m, "Display IMR:\t%08x\n",
451 for_each_pipe(dev_priv, pipe) {
452 enum intel_display_power_domain power_domain;
454 power_domain = POWER_DOMAIN_PIPE(pipe);
455 pref = intel_display_power_get_if_enabled(dev_priv,
458 seq_printf(m, "Pipe %c power disabled\n",
463 seq_printf(m, "Pipe %c stat:\t%08x\n",
465 I915_READ(PIPESTAT(pipe)));
467 intel_display_power_put(dev_priv, power_domain, pref);
470 pref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
471 seq_printf(m, "Port hotplug:\t%08x\n",
472 I915_READ(PORT_HOTPLUG_EN));
473 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
474 I915_READ(VLV_DPFLIPSTAT));
475 seq_printf(m, "DPINVGTT:\t%08x\n",
476 I915_READ(DPINVGTT));
477 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, pref);
479 for (i = 0; i < 4; i++) {
480 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
481 i, I915_READ(GEN8_GT_IMR(i)));
482 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
483 i, I915_READ(GEN8_GT_IIR(i)));
484 seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
485 i, I915_READ(GEN8_GT_IER(i)));
488 seq_printf(m, "PCU interrupt mask:\t%08x\n",
489 I915_READ(GEN8_PCU_IMR));
490 seq_printf(m, "PCU interrupt identity:\t%08x\n",
491 I915_READ(GEN8_PCU_IIR));
492 seq_printf(m, "PCU interrupt enable:\t%08x\n",
493 I915_READ(GEN8_PCU_IER));
494 } else if (INTEL_GEN(dev_priv) >= 11) {
495 seq_printf(m, "Master Interrupt Control: %08x\n",
496 I915_READ(GEN11_GFX_MSTR_IRQ));
498 seq_printf(m, "Render/Copy Intr Enable: %08x\n",
499 I915_READ(GEN11_RENDER_COPY_INTR_ENABLE));
500 seq_printf(m, "VCS/VECS Intr Enable: %08x\n",
501 I915_READ(GEN11_VCS_VECS_INTR_ENABLE));
502 seq_printf(m, "GUC/SG Intr Enable:\t %08x\n",
503 I915_READ(GEN11_GUC_SG_INTR_ENABLE));
504 seq_printf(m, "GPM/WGBOXPERF Intr Enable: %08x\n",
505 I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE));
506 seq_printf(m, "Crypto Intr Enable:\t %08x\n",
507 I915_READ(GEN11_CRYPTO_RSVD_INTR_ENABLE));
508 seq_printf(m, "GUnit/CSME Intr Enable:\t %08x\n",
509 I915_READ(GEN11_GUNIT_CSME_INTR_ENABLE));
511 seq_printf(m, "Display Interrupt Control:\t%08x\n",
512 I915_READ(GEN11_DISPLAY_INT_CTL));
514 gen8_display_interrupt_info(m);
515 } else if (INTEL_GEN(dev_priv) >= 8) {
516 seq_printf(m, "Master Interrupt Control:\t%08x\n",
517 I915_READ(GEN8_MASTER_IRQ));
519 for (i = 0; i < 4; i++) {
520 seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
521 i, I915_READ(GEN8_GT_IMR(i)));
522 seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
523 i, I915_READ(GEN8_GT_IIR(i)));
524 seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
525 i, I915_READ(GEN8_GT_IER(i)));
528 gen8_display_interrupt_info(m);
529 } else if (IS_VALLEYVIEW(dev_priv)) {
530 seq_printf(m, "Display IER:\t%08x\n",
532 seq_printf(m, "Display IIR:\t%08x\n",
534 seq_printf(m, "Display IIR_RW:\t%08x\n",
535 I915_READ(VLV_IIR_RW));
536 seq_printf(m, "Display IMR:\t%08x\n",
538 for_each_pipe(dev_priv, pipe) {
539 enum intel_display_power_domain power_domain;
540 intel_wakeref_t pref;
542 power_domain = POWER_DOMAIN_PIPE(pipe);
543 pref = intel_display_power_get_if_enabled(dev_priv,
546 seq_printf(m, "Pipe %c power disabled\n",
551 seq_printf(m, "Pipe %c stat:\t%08x\n",
553 I915_READ(PIPESTAT(pipe)));
554 intel_display_power_put(dev_priv, power_domain, pref);
557 seq_printf(m, "Master IER:\t%08x\n",
558 I915_READ(VLV_MASTER_IER));
560 seq_printf(m, "Render IER:\t%08x\n",
562 seq_printf(m, "Render IIR:\t%08x\n",
564 seq_printf(m, "Render IMR:\t%08x\n",
567 seq_printf(m, "PM IER:\t\t%08x\n",
568 I915_READ(GEN6_PMIER));
569 seq_printf(m, "PM IIR:\t\t%08x\n",
570 I915_READ(GEN6_PMIIR));
571 seq_printf(m, "PM IMR:\t\t%08x\n",
572 I915_READ(GEN6_PMIMR));
574 seq_printf(m, "Port hotplug:\t%08x\n",
575 I915_READ(PORT_HOTPLUG_EN));
576 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
577 I915_READ(VLV_DPFLIPSTAT));
578 seq_printf(m, "DPINVGTT:\t%08x\n",
579 I915_READ(DPINVGTT));
581 } else if (!HAS_PCH_SPLIT(dev_priv)) {
582 seq_printf(m, "Interrupt enable: %08x\n",
583 I915_READ(GEN2_IER));
584 seq_printf(m, "Interrupt identity: %08x\n",
585 I915_READ(GEN2_IIR));
586 seq_printf(m, "Interrupt mask: %08x\n",
587 I915_READ(GEN2_IMR));
588 for_each_pipe(dev_priv, pipe)
589 seq_printf(m, "Pipe %c stat: %08x\n",
591 I915_READ(PIPESTAT(pipe)));
593 seq_printf(m, "North Display Interrupt enable: %08x\n",
595 seq_printf(m, "North Display Interrupt identity: %08x\n",
597 seq_printf(m, "North Display Interrupt mask: %08x\n",
599 seq_printf(m, "South Display Interrupt enable: %08x\n",
601 seq_printf(m, "South Display Interrupt identity: %08x\n",
603 seq_printf(m, "South Display Interrupt mask: %08x\n",
605 seq_printf(m, "Graphics Interrupt enable: %08x\n",
607 seq_printf(m, "Graphics Interrupt identity: %08x\n",
609 seq_printf(m, "Graphics Interrupt mask: %08x\n",
613 if (INTEL_GEN(dev_priv) >= 11) {
614 seq_printf(m, "RCS Intr Mask:\t %08x\n",
615 I915_READ(GEN11_RCS0_RSVD_INTR_MASK));
616 seq_printf(m, "BCS Intr Mask:\t %08x\n",
617 I915_READ(GEN11_BCS_RSVD_INTR_MASK));
618 seq_printf(m, "VCS0/VCS1 Intr Mask:\t %08x\n",
619 I915_READ(GEN11_VCS0_VCS1_INTR_MASK));
620 seq_printf(m, "VCS2/VCS3 Intr Mask:\t %08x\n",
621 I915_READ(GEN11_VCS2_VCS3_INTR_MASK));
622 seq_printf(m, "VECS0/VECS1 Intr Mask:\t %08x\n",
623 I915_READ(GEN11_VECS0_VECS1_INTR_MASK));
624 seq_printf(m, "GUC/SG Intr Mask:\t %08x\n",
625 I915_READ(GEN11_GUC_SG_INTR_MASK));
626 seq_printf(m, "GPM/WGBOXPERF Intr Mask: %08x\n",
627 I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK));
628 seq_printf(m, "Crypto Intr Mask:\t %08x\n",
629 I915_READ(GEN11_CRYPTO_RSVD_INTR_MASK));
630 seq_printf(m, "Gunit/CSME Intr Mask:\t %08x\n",
631 I915_READ(GEN11_GUNIT_CSME_INTR_MASK));
633 } else if (INTEL_GEN(dev_priv) >= 6) {
634 for_each_uabi_engine(engine, dev_priv) {
636 "Graphics Interrupt mask (%s): %08x\n",
637 engine->name, ENGINE_READ(engine, RING_IMR));
641 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
646 static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
648 struct drm_i915_private *i915 = node_to_i915(m->private);
651 seq_printf(m, "Total fences = %d\n", i915->ggtt.num_fences);
654 for (i = 0; i < i915->ggtt.num_fences; i++) {
655 struct i915_fence_reg *reg = &i915->ggtt.fence_regs[i];
656 struct i915_vma *vma = reg->vma;
658 seq_printf(m, "Fence %d, pin count = %d, object = ",
659 i, atomic_read(®->pin_count));
661 seq_puts(m, "unused");
663 describe_obj(m, vma->obj);
671 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
672 static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
673 size_t count, loff_t *pos)
675 struct i915_gpu_state *error;
679 error = file->private_data;
683 /* Bounce buffer required because of kernfs __user API convenience. */
684 buf = kmalloc(count, GFP_KERNEL);
688 ret = i915_gpu_state_copy_to_buffer(error, buf, *pos, count);
692 if (!copy_to_user(ubuf, buf, ret))
702 static int gpu_state_release(struct inode *inode, struct file *file)
704 i915_gpu_state_put(file->private_data);
708 static int i915_gpu_info_open(struct inode *inode, struct file *file)
710 struct drm_i915_private *i915 = inode->i_private;
711 struct i915_gpu_state *gpu;
712 intel_wakeref_t wakeref;
715 with_intel_runtime_pm(&i915->runtime_pm, wakeref)
716 gpu = i915_capture_gpu_state(i915);
720 file->private_data = gpu;
724 static const struct file_operations i915_gpu_info_fops = {
725 .owner = THIS_MODULE,
726 .open = i915_gpu_info_open,
727 .read = gpu_state_read,
728 .llseek = default_llseek,
729 .release = gpu_state_release,
733 i915_error_state_write(struct file *filp,
734 const char __user *ubuf,
738 struct i915_gpu_state *error = filp->private_data;
743 DRM_DEBUG_DRIVER("Resetting error state\n");
744 i915_reset_error_state(error->i915);
749 static int i915_error_state_open(struct inode *inode, struct file *file)
751 struct i915_gpu_state *error;
753 error = i915_first_error_state(inode->i_private);
755 return PTR_ERR(error);
757 file->private_data = error;
761 static const struct file_operations i915_error_state_fops = {
762 .owner = THIS_MODULE,
763 .open = i915_error_state_open,
764 .read = gpu_state_read,
765 .write = i915_error_state_write,
766 .llseek = default_llseek,
767 .release = gpu_state_release,
771 static int i915_frequency_info(struct seq_file *m, void *unused)
773 struct drm_i915_private *dev_priv = node_to_i915(m->private);
774 struct intel_uncore *uncore = &dev_priv->uncore;
775 struct intel_rps *rps = &dev_priv->gt_pm.rps;
776 intel_wakeref_t wakeref;
779 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
781 if (IS_GEN(dev_priv, 5)) {
782 u16 rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
783 u16 rgvstat = intel_uncore_read16(uncore, MEMSTAT_ILK);
785 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
786 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
787 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
789 seq_printf(m, "Current P-state: %d\n",
790 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
791 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
792 u32 rpmodectl, freq_sts;
794 rpmodectl = I915_READ(GEN6_RP_CONTROL);
795 seq_printf(m, "Video Turbo Mode: %s\n",
796 yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
797 seq_printf(m, "HW control enabled: %s\n",
798 yesno(rpmodectl & GEN6_RP_ENABLE));
799 seq_printf(m, "SW control enabled: %s\n",
800 yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
801 GEN6_RP_MEDIA_SW_MODE));
803 vlv_punit_get(dev_priv);
804 freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
805 vlv_punit_put(dev_priv);
807 seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
808 seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
810 seq_printf(m, "actual GPU freq: %d MHz\n",
811 intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
813 seq_printf(m, "current GPU freq: %d MHz\n",
814 intel_gpu_freq(dev_priv, rps->cur_freq));
816 seq_printf(m, "max GPU freq: %d MHz\n",
817 intel_gpu_freq(dev_priv, rps->max_freq));
819 seq_printf(m, "min GPU freq: %d MHz\n",
820 intel_gpu_freq(dev_priv, rps->min_freq));
822 seq_printf(m, "idle GPU freq: %d MHz\n",
823 intel_gpu_freq(dev_priv, rps->idle_freq));
826 "efficient (RPe) frequency: %d MHz\n",
827 intel_gpu_freq(dev_priv, rps->efficient_freq));
828 } else if (INTEL_GEN(dev_priv) >= 6) {
832 u32 rpmodectl, rpinclimit, rpdeclimit;
833 u32 rpstat, cagf, reqf;
834 u32 rpupei, rpcurup, rpprevup;
835 u32 rpdownei, rpcurdown, rpprevdown;
836 u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
839 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
840 if (IS_GEN9_LP(dev_priv)) {
841 rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
842 gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
844 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
845 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
848 /* RPSTAT1 is in the GT power well */
849 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
851 reqf = I915_READ(GEN6_RPNSWREQ);
852 if (INTEL_GEN(dev_priv) >= 9)
855 reqf &= ~GEN6_TURBO_DISABLE;
856 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
861 reqf = intel_gpu_freq(dev_priv, reqf);
863 rpmodectl = I915_READ(GEN6_RP_CONTROL);
864 rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
865 rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
867 rpstat = I915_READ(GEN6_RPSTAT1);
868 rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
869 rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
870 rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
871 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
872 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
873 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
874 cagf = intel_gpu_freq(dev_priv,
875 intel_get_cagf(dev_priv, rpstat));
877 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
879 if (INTEL_GEN(dev_priv) >= 11) {
880 pm_ier = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE);
881 pm_imr = I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK);
883 * The equivalent to the PM ISR & IIR cannot be read
884 * without affecting the current state of the system
888 } else if (INTEL_GEN(dev_priv) >= 8) {
889 pm_ier = I915_READ(GEN8_GT_IER(2));
890 pm_imr = I915_READ(GEN8_GT_IMR(2));
891 pm_isr = I915_READ(GEN8_GT_ISR(2));
892 pm_iir = I915_READ(GEN8_GT_IIR(2));
894 pm_ier = I915_READ(GEN6_PMIER);
895 pm_imr = I915_READ(GEN6_PMIMR);
896 pm_isr = I915_READ(GEN6_PMISR);
897 pm_iir = I915_READ(GEN6_PMIIR);
899 pm_mask = I915_READ(GEN6_PMINTRMSK);
901 seq_printf(m, "Video Turbo Mode: %s\n",
902 yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
903 seq_printf(m, "HW control enabled: %s\n",
904 yesno(rpmodectl & GEN6_RP_ENABLE));
905 seq_printf(m, "SW control enabled: %s\n",
906 yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
907 GEN6_RP_MEDIA_SW_MODE));
909 seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
910 pm_ier, pm_imr, pm_mask);
911 if (INTEL_GEN(dev_priv) <= 10)
912 seq_printf(m, "PM ISR=0x%08x IIR=0x%08x\n",
914 seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
915 rps->pm_intrmsk_mbz);
916 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
917 seq_printf(m, "Render p-state ratio: %d\n",
918 (gt_perf_status & (INTEL_GEN(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
919 seq_printf(m, "Render p-state VID: %d\n",
920 gt_perf_status & 0xff);
921 seq_printf(m, "Render p-state limit: %d\n",
922 rp_state_limits & 0xff);
923 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
924 seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
925 seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
926 seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
927 seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
928 seq_printf(m, "CAGF: %dMHz\n", cagf);
929 seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
930 rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei));
931 seq_printf(m, "RP CUR UP: %d (%dus)\n",
932 rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
933 seq_printf(m, "RP PREV UP: %d (%dus)\n",
934 rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
935 seq_printf(m, "Up threshold: %d%%\n",
936 rps->power.up_threshold);
938 seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
939 rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
940 seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
941 rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
942 seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
943 rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
944 seq_printf(m, "Down threshold: %d%%\n",
945 rps->power.down_threshold);
947 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
948 rp_state_cap >> 16) & 0xff;
949 max_freq *= (IS_GEN9_BC(dev_priv) ||
950 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
951 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
952 intel_gpu_freq(dev_priv, max_freq));
954 max_freq = (rp_state_cap & 0xff00) >> 8;
955 max_freq *= (IS_GEN9_BC(dev_priv) ||
956 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
957 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
958 intel_gpu_freq(dev_priv, max_freq));
960 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
961 rp_state_cap >> 0) & 0xff;
962 max_freq *= (IS_GEN9_BC(dev_priv) ||
963 INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
964 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
965 intel_gpu_freq(dev_priv, max_freq));
966 seq_printf(m, "Max overclocked frequency: %dMHz\n",
967 intel_gpu_freq(dev_priv, rps->max_freq));
969 seq_printf(m, "Current freq: %d MHz\n",
970 intel_gpu_freq(dev_priv, rps->cur_freq));
971 seq_printf(m, "Actual freq: %d MHz\n", cagf);
972 seq_printf(m, "Idle freq: %d MHz\n",
973 intel_gpu_freq(dev_priv, rps->idle_freq));
974 seq_printf(m, "Min freq: %d MHz\n",
975 intel_gpu_freq(dev_priv, rps->min_freq));
976 seq_printf(m, "Boost freq: %d MHz\n",
977 intel_gpu_freq(dev_priv, rps->boost_freq));
978 seq_printf(m, "Max freq: %d MHz\n",
979 intel_gpu_freq(dev_priv, rps->max_freq));
981 "efficient (RPe) frequency: %d MHz\n",
982 intel_gpu_freq(dev_priv, rps->efficient_freq));
984 seq_puts(m, "no P-state info available\n");
987 seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk.hw.cdclk);
988 seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
989 seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
991 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
995 static void i915_instdone_info(struct drm_i915_private *dev_priv,
997 struct intel_instdone *instdone)
1002 seq_printf(m, "\t\tINSTDONE: 0x%08x\n",
1003 instdone->instdone);
1005 if (INTEL_GEN(dev_priv) <= 3)
1008 seq_printf(m, "\t\tSC_INSTDONE: 0x%08x\n",
1009 instdone->slice_common);
1011 if (INTEL_GEN(dev_priv) <= 6)
1014 for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1015 seq_printf(m, "\t\tSAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
1016 slice, subslice, instdone->sampler[slice][subslice]);
1018 for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1019 seq_printf(m, "\t\tROW_INSTDONE[%d][%d]: 0x%08x\n",
1020 slice, subslice, instdone->row[slice][subslice]);
1023 static int i915_hangcheck_info(struct seq_file *m, void *unused)
1025 struct drm_i915_private *i915 = node_to_i915(m->private);
1026 struct intel_gt *gt = &i915->gt;
1027 struct intel_engine_cs *engine;
1028 intel_wakeref_t wakeref;
1029 enum intel_engine_id id;
1031 seq_printf(m, "Reset flags: %lx\n", gt->reset.flags);
1032 if (test_bit(I915_WEDGED, >->reset.flags))
1033 seq_puts(m, "\tWedged\n");
1034 if (test_bit(I915_RESET_BACKOFF, >->reset.flags))
1035 seq_puts(m, "\tDevice (global) reset in progress\n");
1037 if (!i915_modparams.enable_hangcheck) {
1038 seq_puts(m, "Hangcheck disabled\n");
1042 if (timer_pending(>->hangcheck.work.timer))
1043 seq_printf(m, "Hangcheck active, timer fires in %dms\n",
1044 jiffies_to_msecs(gt->hangcheck.work.timer.expires -
1046 else if (delayed_work_pending(>->hangcheck.work))
1047 seq_puts(m, "Hangcheck active, work pending\n");
1049 seq_puts(m, "Hangcheck inactive\n");
1051 seq_printf(m, "GT active? %s\n", yesno(gt->awake));
1053 with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
1054 for_each_engine(engine, i915, id) {
1055 struct intel_instdone instdone;
1057 seq_printf(m, "%s: %d ms ago\n",
1059 jiffies_to_msecs(jiffies -
1060 engine->hangcheck.action_timestamp));
1062 seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
1063 (long long)engine->hangcheck.acthd,
1064 intel_engine_get_active_head(engine));
1066 intel_engine_get_instdone(engine, &instdone);
1068 seq_puts(m, "\tinstdone read =\n");
1069 i915_instdone_info(i915, m, &instdone);
1071 seq_puts(m, "\tinstdone accu =\n");
1072 i915_instdone_info(i915, m,
1073 &engine->hangcheck.instdone);
1080 static int ironlake_drpc_info(struct seq_file *m)
1082 struct drm_i915_private *i915 = node_to_i915(m->private);
1083 struct intel_uncore *uncore = &i915->uncore;
1084 u32 rgvmodectl, rstdbyctl;
1087 rgvmodectl = intel_uncore_read(uncore, MEMMODECTL);
1088 rstdbyctl = intel_uncore_read(uncore, RSTDBYCTL);
1089 crstandvid = intel_uncore_read16(uncore, CRSTANDVID);
1091 seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
1092 seq_printf(m, "Boost freq: %d\n",
1093 (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
1094 MEMMODE_BOOST_FREQ_SHIFT);
1095 seq_printf(m, "HW control enabled: %s\n",
1096 yesno(rgvmodectl & MEMMODE_HWIDLE_EN));
1097 seq_printf(m, "SW control enabled: %s\n",
1098 yesno(rgvmodectl & MEMMODE_SWMODE_EN));
1099 seq_printf(m, "Gated voltage change: %s\n",
1100 yesno(rgvmodectl & MEMMODE_RCLK_GATE));
1101 seq_printf(m, "Starting frequency: P%d\n",
1102 (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
1103 seq_printf(m, "Max P-state: P%d\n",
1104 (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
1105 seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
1106 seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
1107 seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1108 seq_printf(m, "Render standby enabled: %s\n",
1109 yesno(!(rstdbyctl & RCX_SW_EXIT)));
1110 seq_puts(m, "Current RS state: ");
1111 switch (rstdbyctl & RSX_STATUS_MASK) {
1113 seq_puts(m, "on\n");
1115 case RSX_STATUS_RC1:
1116 seq_puts(m, "RC1\n");
1118 case RSX_STATUS_RC1E:
1119 seq_puts(m, "RC1E\n");
1121 case RSX_STATUS_RS1:
1122 seq_puts(m, "RS1\n");
1124 case RSX_STATUS_RS2:
1125 seq_puts(m, "RS2 (RC6)\n");
1127 case RSX_STATUS_RS3:
1128 seq_puts(m, "RC3 (RC6+)\n");
1131 seq_puts(m, "unknown\n");
1138 static int i915_forcewake_domains(struct seq_file *m, void *data)
1140 struct drm_i915_private *i915 = node_to_i915(m->private);
1141 struct intel_uncore *uncore = &i915->uncore;
1142 struct intel_uncore_forcewake_domain *fw_domain;
1145 seq_printf(m, "user.bypass_count = %u\n",
1146 uncore->user_forcewake_count);
1148 for_each_fw_domain(fw_domain, uncore, tmp)
1149 seq_printf(m, "%s.wake_count = %u\n",
1150 intel_uncore_forcewake_domain_to_str(fw_domain->id),
1151 READ_ONCE(fw_domain->wake_count));
1156 static void print_rc6_res(struct seq_file *m,
1158 const i915_reg_t reg)
1160 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1162 seq_printf(m, "%s %u (%llu us)\n",
1163 title, I915_READ(reg),
1164 intel_rc6_residency_us(dev_priv, reg));
1167 static int vlv_drpc_info(struct seq_file *m)
1169 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1170 u32 rcctl1, pw_status;
1172 pw_status = I915_READ(VLV_GTLC_PW_STATUS);
1173 rcctl1 = I915_READ(GEN6_RC_CONTROL);
1175 seq_printf(m, "RC6 Enabled: %s\n",
1176 yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
1177 GEN6_RC_CTL_EI_MODE(1))));
1178 seq_printf(m, "Render Power Well: %s\n",
1179 (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
1180 seq_printf(m, "Media Power Well: %s\n",
1181 (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
1183 print_rc6_res(m, "Render RC6 residency since boot:", VLV_GT_RENDER_RC6);
1184 print_rc6_res(m, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6);
1186 return i915_forcewake_domains(m, NULL);
1189 static int gen6_drpc_info(struct seq_file *m)
1191 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1192 u32 gt_core_status, rcctl1, rc6vids = 0;
1193 u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
1195 gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS);
1196 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
1198 rcctl1 = I915_READ(GEN6_RC_CONTROL);
1199 if (INTEL_GEN(dev_priv) >= 9) {
1200 gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE);
1201 gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
1204 if (INTEL_GEN(dev_priv) <= 7)
1205 sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS,
1208 seq_printf(m, "RC1e Enabled: %s\n",
1209 yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1210 seq_printf(m, "RC6 Enabled: %s\n",
1211 yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
1212 if (INTEL_GEN(dev_priv) >= 9) {
1213 seq_printf(m, "Render Well Gating Enabled: %s\n",
1214 yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
1215 seq_printf(m, "Media Well Gating Enabled: %s\n",
1216 yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE));
1218 seq_printf(m, "Deep RC6 Enabled: %s\n",
1219 yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1220 seq_printf(m, "Deepest RC6 Enabled: %s\n",
1221 yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
1222 seq_puts(m, "Current RC state: ");
1223 switch (gt_core_status & GEN6_RCn_MASK) {
1225 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
1226 seq_puts(m, "Core Power Down\n");
1228 seq_puts(m, "on\n");
1231 seq_puts(m, "RC3\n");
1234 seq_puts(m, "RC6\n");
1237 seq_puts(m, "RC7\n");
1240 seq_puts(m, "Unknown\n");
1244 seq_printf(m, "Core Power Down: %s\n",
1245 yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
1246 if (INTEL_GEN(dev_priv) >= 9) {
1247 seq_printf(m, "Render Power Well: %s\n",
1248 (gen9_powergate_status &
1249 GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
1250 seq_printf(m, "Media Power Well: %s\n",
1251 (gen9_powergate_status &
1252 GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down");
1255 /* Not exactly sure what this is */
1256 print_rc6_res(m, "RC6 \"Locked to RPn\" residency since boot:",
1257 GEN6_GT_GFX_RC6_LOCKED);
1258 print_rc6_res(m, "RC6 residency since boot:", GEN6_GT_GFX_RC6);
1259 print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p);
1260 print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp);
1262 if (INTEL_GEN(dev_priv) <= 7) {
1263 seq_printf(m, "RC6 voltage: %dmV\n",
1264 GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
1265 seq_printf(m, "RC6+ voltage: %dmV\n",
1266 GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
1267 seq_printf(m, "RC6++ voltage: %dmV\n",
1268 GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
1271 return i915_forcewake_domains(m, NULL);
1274 static int i915_drpc_info(struct seq_file *m, void *unused)
1276 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1277 intel_wakeref_t wakeref;
1280 with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
1281 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1282 err = vlv_drpc_info(m);
1283 else if (INTEL_GEN(dev_priv) >= 6)
1284 err = gen6_drpc_info(m);
1286 err = ironlake_drpc_info(m);
1292 static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
1294 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1296 seq_printf(m, "FB tracking busy bits: 0x%08x\n",
1297 dev_priv->fb_tracking.busy_bits);
1299 seq_printf(m, "FB tracking flip bits: 0x%08x\n",
1300 dev_priv->fb_tracking.flip_bits);
1305 static int i915_fbc_status(struct seq_file *m, void *unused)
1307 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1308 struct intel_fbc *fbc = &dev_priv->fbc;
1309 intel_wakeref_t wakeref;
1311 if (!HAS_FBC(dev_priv))
1314 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1315 mutex_lock(&fbc->lock);
1317 if (intel_fbc_is_active(dev_priv))
1318 seq_puts(m, "FBC enabled\n");
1320 seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
1322 if (intel_fbc_is_active(dev_priv)) {
1325 if (INTEL_GEN(dev_priv) >= 8)
1326 mask = I915_READ(IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
1327 else if (INTEL_GEN(dev_priv) >= 7)
1328 mask = I915_READ(IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
1329 else if (INTEL_GEN(dev_priv) >= 5)
1330 mask = I915_READ(ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
1331 else if (IS_G4X(dev_priv))
1332 mask = I915_READ(DPFC_STATUS) & DPFC_COMP_SEG_MASK;
1334 mask = I915_READ(FBC_STATUS) & (FBC_STAT_COMPRESSING |
1335 FBC_STAT_COMPRESSED);
1337 seq_printf(m, "Compressing: %s\n", yesno(mask));
1340 mutex_unlock(&fbc->lock);
1341 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1346 static int i915_fbc_false_color_get(void *data, u64 *val)
1348 struct drm_i915_private *dev_priv = data;
1350 if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
1353 *val = dev_priv->fbc.false_color;
1358 static int i915_fbc_false_color_set(void *data, u64 val)
1360 struct drm_i915_private *dev_priv = data;
1363 if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
1366 mutex_lock(&dev_priv->fbc.lock);
1368 reg = I915_READ(ILK_DPFC_CONTROL);
1369 dev_priv->fbc.false_color = val;
1371 I915_WRITE(ILK_DPFC_CONTROL, val ?
1372 (reg | FBC_CTL_FALSE_COLOR) :
1373 (reg & ~FBC_CTL_FALSE_COLOR));
1375 mutex_unlock(&dev_priv->fbc.lock);
1379 DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
1380 i915_fbc_false_color_get, i915_fbc_false_color_set,
1383 static int i915_ips_status(struct seq_file *m, void *unused)
1385 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1386 intel_wakeref_t wakeref;
1388 if (!HAS_IPS(dev_priv))
1391 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1393 seq_printf(m, "Enabled by kernel parameter: %s\n",
1394 yesno(i915_modparams.enable_ips));
1396 if (INTEL_GEN(dev_priv) >= 8) {
1397 seq_puts(m, "Currently: unknown\n");
1399 if (I915_READ(IPS_CTL) & IPS_ENABLE)
1400 seq_puts(m, "Currently: enabled\n");
1402 seq_puts(m, "Currently: disabled\n");
1405 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1410 static int i915_sr_status(struct seq_file *m, void *unused)
1412 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1413 intel_wakeref_t wakeref;
1414 bool sr_enabled = false;
1416 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
1418 if (INTEL_GEN(dev_priv) >= 9)
1419 /* no global SR status; inspect per-plane WM */;
1420 else if (HAS_PCH_SPLIT(dev_priv))
1421 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
1422 else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
1423 IS_I945G(dev_priv) || IS_I945GM(dev_priv))
1424 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
1425 else if (IS_I915GM(dev_priv))
1426 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
1427 else if (IS_PINEVIEW(dev_priv))
1428 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
1429 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1430 sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
1432 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
1434 seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
1439 static int i915_ring_freq_table(struct seq_file *m, void *unused)
1441 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1442 struct intel_rps *rps = &dev_priv->gt_pm.rps;
1443 unsigned int max_gpu_freq, min_gpu_freq;
1444 intel_wakeref_t wakeref;
1445 int gpu_freq, ia_freq;
1447 if (!HAS_LLC(dev_priv))
1450 min_gpu_freq = rps->min_freq;
1451 max_gpu_freq = rps->max_freq;
1452 if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
1453 /* Convert GT frequency to 50 HZ units */
1454 min_gpu_freq /= GEN9_FREQ_SCALER;
1455 max_gpu_freq /= GEN9_FREQ_SCALER;
1458 seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
1460 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1461 for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
1463 sandybridge_pcode_read(dev_priv,
1464 GEN6_PCODE_READ_MIN_FREQ_TABLE,
1466 seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
1467 intel_gpu_freq(dev_priv, (gpu_freq *
1468 (IS_GEN9_BC(dev_priv) ||
1469 INTEL_GEN(dev_priv) >= 10 ?
1470 GEN9_FREQ_SCALER : 1))),
1471 ((ia_freq >> 0) & 0xff) * 100,
1472 ((ia_freq >> 8) & 0xff) * 100);
1474 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1479 static int i915_opregion(struct seq_file *m, void *unused)
1481 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1482 struct drm_device *dev = &dev_priv->drm;
1483 struct intel_opregion *opregion = &dev_priv->opregion;
1486 ret = mutex_lock_interruptible(&dev->struct_mutex);
1490 if (opregion->header)
1491 seq_write(m, opregion->header, OPREGION_SIZE);
1493 mutex_unlock(&dev->struct_mutex);
1499 static int i915_vbt(struct seq_file *m, void *unused)
1501 struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
1504 seq_write(m, opregion->vbt, opregion->vbt_size);
1509 static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1511 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1512 struct drm_device *dev = &dev_priv->drm;
1513 struct intel_framebuffer *fbdev_fb = NULL;
1514 struct drm_framebuffer *drm_fb;
1517 ret = mutex_lock_interruptible(&dev->struct_mutex);
1521 #ifdef CONFIG_DRM_FBDEV_EMULATION
1522 if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
1523 fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
1525 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1526 fbdev_fb->base.width,
1527 fbdev_fb->base.height,
1528 fbdev_fb->base.format->depth,
1529 fbdev_fb->base.format->cpp[0] * 8,
1530 fbdev_fb->base.modifier,
1531 drm_framebuffer_read_refcount(&fbdev_fb->base));
1532 describe_obj(m, intel_fb_obj(&fbdev_fb->base));
1537 mutex_lock(&dev->mode_config.fb_lock);
1538 drm_for_each_fb(drm_fb, dev) {
1539 struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
1543 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1546 fb->base.format->depth,
1547 fb->base.format->cpp[0] * 8,
1549 drm_framebuffer_read_refcount(&fb->base));
1550 describe_obj(m, intel_fb_obj(&fb->base));
1553 mutex_unlock(&dev->mode_config.fb_lock);
1554 mutex_unlock(&dev->struct_mutex);
1559 static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
1561 seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, emit: %u)",
1562 ring->space, ring->head, ring->tail, ring->emit);
1565 static int i915_context_status(struct seq_file *m, void *unused)
1567 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1568 struct drm_device *dev = &dev_priv->drm;
1569 struct i915_gem_context *ctx;
1572 ret = mutex_lock_interruptible(&dev->struct_mutex);
1576 list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
1577 struct i915_gem_engines_iter it;
1578 struct intel_context *ce;
1580 seq_puts(m, "HW context ");
1581 if (!list_empty(&ctx->hw_id_link))
1582 seq_printf(m, "%x [pin %u]", ctx->hw_id,
1583 atomic_read(&ctx->hw_id_pin_count));
1585 struct task_struct *task;
1587 task = get_pid_task(ctx->pid, PIDTYPE_PID);
1589 seq_printf(m, "(%s [%d]) ",
1590 task->comm, task->pid);
1591 put_task_struct(task);
1593 } else if (IS_ERR(ctx->file_priv)) {
1594 seq_puts(m, "(deleted) ");
1596 seq_puts(m, "(kernel) ");
1599 seq_putc(m, ctx->remap_slice ? 'R' : 'r');
1602 for_each_gem_engine(ce,
1603 i915_gem_context_lock_engines(ctx), it) {
1604 intel_context_lock_pinned(ce);
1605 if (intel_context_is_pinned(ce)) {
1606 seq_printf(m, "%s: ", ce->engine->name);
1608 describe_obj(m, ce->state->obj);
1609 describe_ctx_ring(m, ce->ring);
1612 intel_context_unlock_pinned(ce);
1614 i915_gem_context_unlock_engines(ctx);
1619 mutex_unlock(&dev->struct_mutex);
1624 static const char *swizzle_string(unsigned swizzle)
1627 case I915_BIT_6_SWIZZLE_NONE:
1629 case I915_BIT_6_SWIZZLE_9:
1631 case I915_BIT_6_SWIZZLE_9_10:
1632 return "bit9/bit10";
1633 case I915_BIT_6_SWIZZLE_9_11:
1634 return "bit9/bit11";
1635 case I915_BIT_6_SWIZZLE_9_10_11:
1636 return "bit9/bit10/bit11";
1637 case I915_BIT_6_SWIZZLE_9_17:
1638 return "bit9/bit17";
1639 case I915_BIT_6_SWIZZLE_9_10_17:
1640 return "bit9/bit10/bit17";
1641 case I915_BIT_6_SWIZZLE_UNKNOWN:
1648 static int i915_swizzle_info(struct seq_file *m, void *data)
1650 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1651 struct intel_uncore *uncore = &dev_priv->uncore;
1652 intel_wakeref_t wakeref;
1654 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1656 seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
1657 swizzle_string(dev_priv->mm.bit_6_swizzle_x));
1658 seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
1659 swizzle_string(dev_priv->mm.bit_6_swizzle_y));
1661 if (IS_GEN_RANGE(dev_priv, 3, 4)) {
1662 seq_printf(m, "DDC = 0x%08x\n",
1663 intel_uncore_read(uncore, DCC));
1664 seq_printf(m, "DDC2 = 0x%08x\n",
1665 intel_uncore_read(uncore, DCC2));
1666 seq_printf(m, "C0DRB3 = 0x%04x\n",
1667 intel_uncore_read16(uncore, C0DRB3));
1668 seq_printf(m, "C1DRB3 = 0x%04x\n",
1669 intel_uncore_read16(uncore, C1DRB3));
1670 } else if (INTEL_GEN(dev_priv) >= 6) {
1671 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
1672 intel_uncore_read(uncore, MAD_DIMM_C0));
1673 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
1674 intel_uncore_read(uncore, MAD_DIMM_C1));
1675 seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
1676 intel_uncore_read(uncore, MAD_DIMM_C2));
1677 seq_printf(m, "TILECTL = 0x%08x\n",
1678 intel_uncore_read(uncore, TILECTL));
1679 if (INTEL_GEN(dev_priv) >= 8)
1680 seq_printf(m, "GAMTARBMODE = 0x%08x\n",
1681 intel_uncore_read(uncore, GAMTARBMODE));
1683 seq_printf(m, "ARB_MODE = 0x%08x\n",
1684 intel_uncore_read(uncore, ARB_MODE));
1685 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
1686 intel_uncore_read(uncore, DISP_ARB_CTL));
1689 if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
1690 seq_puts(m, "L-shaped memory detected\n");
1692 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1697 static const char *rps_power_to_str(unsigned int power)
1699 static const char * const strings[] = {
1700 [LOW_POWER] = "low power",
1701 [BETWEEN] = "mixed",
1702 [HIGH_POWER] = "high power",
1705 if (power >= ARRAY_SIZE(strings) || !strings[power])
1708 return strings[power];
1711 static int i915_rps_boost_info(struct seq_file *m, void *data)
1713 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1714 struct intel_rps *rps = &dev_priv->gt_pm.rps;
1715 u32 act_freq = rps->cur_freq;
1716 intel_wakeref_t wakeref;
1718 with_intel_runtime_pm_if_in_use(&dev_priv->runtime_pm, wakeref) {
1719 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1720 vlv_punit_get(dev_priv);
1721 act_freq = vlv_punit_read(dev_priv,
1722 PUNIT_REG_GPU_FREQ_STS);
1723 vlv_punit_put(dev_priv);
1724 act_freq = (act_freq >> 8) & 0xff;
1726 act_freq = intel_get_cagf(dev_priv,
1727 I915_READ(GEN6_RPSTAT1));
1731 seq_printf(m, "RPS enabled? %d\n", rps->enabled);
1732 seq_printf(m, "GPU busy? %s\n", yesno(dev_priv->gt.awake));
1733 seq_printf(m, "Boosts outstanding? %d\n",
1734 atomic_read(&rps->num_waiters));
1735 seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
1736 seq_printf(m, "Frequency requested %d, actual %d\n",
1737 intel_gpu_freq(dev_priv, rps->cur_freq),
1738 intel_gpu_freq(dev_priv, act_freq));
1739 seq_printf(m, " min hard:%d, soft:%d; max soft:%d, hard:%d\n",
1740 intel_gpu_freq(dev_priv, rps->min_freq),
1741 intel_gpu_freq(dev_priv, rps->min_freq_softlimit),
1742 intel_gpu_freq(dev_priv, rps->max_freq_softlimit),
1743 intel_gpu_freq(dev_priv, rps->max_freq));
1744 seq_printf(m, " idle:%d, efficient:%d, boost:%d\n",
1745 intel_gpu_freq(dev_priv, rps->idle_freq),
1746 intel_gpu_freq(dev_priv, rps->efficient_freq),
1747 intel_gpu_freq(dev_priv, rps->boost_freq));
1749 seq_printf(m, "Wait boosts: %d\n", atomic_read(&rps->boosts));
1751 if (INTEL_GEN(dev_priv) >= 6 && rps->enabled && dev_priv->gt.awake) {
1753 u32 rpdown, rpdownei;
1755 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
1756 rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
1757 rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
1758 rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
1759 rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
1760 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
1762 seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
1763 rps_power_to_str(rps->power.mode));
1764 seq_printf(m, " Avg. up: %d%% [above threshold? %d%%]\n",
1765 rpup && rpupei ? 100 * rpup / rpupei : 0,
1766 rps->power.up_threshold);
1767 seq_printf(m, " Avg. down: %d%% [below threshold? %d%%]\n",
1768 rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
1769 rps->power.down_threshold);
1771 seq_puts(m, "\nRPS Autotuning inactive\n");
1777 static int i915_llc(struct seq_file *m, void *data)
1779 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1780 const bool edram = INTEL_GEN(dev_priv) > 8;
1782 seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv)));
1783 seq_printf(m, "%s: %uMB\n", edram ? "eDRAM" : "eLLC",
1784 dev_priv->edram_size_mb);
1789 static int i915_huc_load_status_info(struct seq_file *m, void *data)
1791 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1792 intel_wakeref_t wakeref;
1793 struct drm_printer p;
1795 if (!HAS_GT_UC(dev_priv))
1798 p = drm_seq_file_printer(m);
1799 intel_uc_fw_dump(&dev_priv->gt.uc.huc.fw, &p);
1801 with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
1802 seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
1807 static int i915_guc_load_status_info(struct seq_file *m, void *data)
1809 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1810 intel_wakeref_t wakeref;
1811 struct drm_printer p;
1813 if (!HAS_GT_UC(dev_priv))
1816 p = drm_seq_file_printer(m);
1817 intel_uc_fw_dump(&dev_priv->gt.uc.guc.fw, &p);
1819 with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
1820 u32 tmp = I915_READ(GUC_STATUS);
1823 seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
1824 seq_printf(m, "\tBootrom status = 0x%x\n",
1825 (tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
1826 seq_printf(m, "\tuKernel status = 0x%x\n",
1827 (tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
1828 seq_printf(m, "\tMIA Core status = 0x%x\n",
1829 (tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
1830 seq_puts(m, "\nScratch registers:\n");
1831 for (i = 0; i < 16; i++) {
1832 seq_printf(m, "\t%2d: \t0x%x\n",
1833 i, I915_READ(SOFT_SCRATCH(i)));
1841 stringify_guc_log_type(enum guc_log_buffer_type type)
1844 case GUC_ISR_LOG_BUFFER:
1846 case GUC_DPC_LOG_BUFFER:
1848 case GUC_CRASH_DUMP_LOG_BUFFER:
1857 static void i915_guc_log_info(struct seq_file *m,
1858 struct drm_i915_private *dev_priv)
1860 struct intel_guc_log *log = &dev_priv->gt.uc.guc.log;
1861 enum guc_log_buffer_type type;
1863 if (!intel_guc_log_relay_enabled(log)) {
1864 seq_puts(m, "GuC log relay disabled\n");
1868 seq_puts(m, "GuC logging stats:\n");
1870 seq_printf(m, "\tRelay full count: %u\n",
1871 log->relay.full_count);
1873 for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
1874 seq_printf(m, "\t%s:\tflush count %10u, overflow count %10u\n",
1875 stringify_guc_log_type(type),
1876 log->stats[type].flush,
1877 log->stats[type].sampled_overflow);
1881 static int i915_guc_info(struct seq_file *m, void *data)
1883 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1884 const struct intel_guc *guc = &dev_priv->gt.uc.guc;
1885 struct intel_guc_client *client = guc->execbuf_client;
1887 if (!USES_GUC(dev_priv))
1890 i915_guc_log_info(m, dev_priv);
1892 if (!USES_GUC_SUBMISSION(dev_priv))
1895 GEM_BUG_ON(!guc->execbuf_client);
1897 seq_printf(m, "\nDoorbell map:\n");
1898 seq_printf(m, "\t%*pb\n", GUC_NUM_DOORBELLS, guc->doorbell_bitmap);
1899 seq_printf(m, "Doorbell next cacheline: 0x%x\n", guc->db_cacheline);
1901 seq_printf(m, "\nGuC execbuf client @ %p:\n", client);
1902 seq_printf(m, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n",
1905 client->proc_desc_offset);
1906 seq_printf(m, "\tDoorbell id %d, offset: 0x%lx\n",
1907 client->doorbell_id, client->doorbell_offset);
1908 /* Add more as required ... */
1913 static int i915_guc_stage_pool(struct seq_file *m, void *data)
1915 struct drm_i915_private *dev_priv = node_to_i915(m->private);
1916 const struct intel_guc *guc = &dev_priv->gt.uc.guc;
1917 struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr;
1920 if (!USES_GUC_SUBMISSION(dev_priv))
1923 for (index = 0; index < GUC_MAX_STAGE_DESCRIPTORS; index++, desc++) {
1924 struct intel_engine_cs *engine;
1926 if (!(desc->attribute & GUC_STAGE_DESC_ATTR_ACTIVE))
1929 seq_printf(m, "GuC stage descriptor %u:\n", index);
1930 seq_printf(m, "\tIndex: %u\n", desc->stage_id);
1931 seq_printf(m, "\tAttribute: 0x%x\n", desc->attribute);
1932 seq_printf(m, "\tPriority: %d\n", desc->priority);
1933 seq_printf(m, "\tDoorbell id: %d\n", desc->db_id);
1934 seq_printf(m, "\tEngines used: 0x%x\n",
1935 desc->engines_used);
1936 seq_printf(m, "\tDoorbell trigger phy: 0x%llx, cpu: 0x%llx, uK: 0x%x\n",
1937 desc->db_trigger_phy,
1938 desc->db_trigger_cpu,
1939 desc->db_trigger_uk);
1940 seq_printf(m, "\tProcess descriptor: 0x%x\n",
1941 desc->process_desc);
1942 seq_printf(m, "\tWorkqueue address: 0x%x, size: 0x%x\n",
1943 desc->wq_addr, desc->wq_size);
1946 for_each_uabi_engine(engine, dev_priv) {
1947 u32 guc_engine_id = engine->guc_id;
1948 struct guc_execlist_context *lrc =
1949 &desc->lrc[guc_engine_id];
1951 seq_printf(m, "\t%s LRC:\n", engine->name);
1952 seq_printf(m, "\t\tContext desc: 0x%x\n",
1954 seq_printf(m, "\t\tContext id: 0x%x\n", lrc->context_id);
1955 seq_printf(m, "\t\tLRCA: 0x%x\n", lrc->ring_lrca);
1956 seq_printf(m, "\t\tRing begin: 0x%x\n", lrc->ring_begin);
1957 seq_printf(m, "\t\tRing end: 0x%x\n", lrc->ring_end);
1965 static int i915_guc_log_dump(struct seq_file *m, void *data)
1967 struct drm_info_node *node = m->private;
1968 struct drm_i915_private *dev_priv = node_to_i915(node);
1969 bool dump_load_err = !!node->info_ent->data;
1970 struct drm_i915_gem_object *obj = NULL;
1974 if (!HAS_GT_UC(dev_priv))
1978 obj = dev_priv->gt.uc.load_err_log;
1979 else if (dev_priv->gt.uc.guc.log.vma)
1980 obj = dev_priv->gt.uc.guc.log.vma->obj;
1985 log = i915_gem_object_pin_map(obj, I915_MAP_WC);
1987 DRM_DEBUG("Failed to pin object\n");
1988 seq_puts(m, "(log data unaccessible)\n");
1989 return PTR_ERR(log);
1992 for (i = 0; i < obj->base.size / sizeof(u32); i += 4)
1993 seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n",
1994 *(log + i), *(log + i + 1),
1995 *(log + i + 2), *(log + i + 3));
1999 i915_gem_object_unpin_map(obj);
2004 static int i915_guc_log_level_get(void *data, u64 *val)
2006 struct drm_i915_private *dev_priv = data;
2008 if (!USES_GUC(dev_priv))
2011 *val = intel_guc_log_get_level(&dev_priv->gt.uc.guc.log);
2016 static int i915_guc_log_level_set(void *data, u64 val)
2018 struct drm_i915_private *dev_priv = data;
2020 if (!USES_GUC(dev_priv))
2023 return intel_guc_log_set_level(&dev_priv->gt.uc.guc.log, val);
2026 DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_level_fops,
2027 i915_guc_log_level_get, i915_guc_log_level_set,
2030 static int i915_guc_log_relay_open(struct inode *inode, struct file *file)
2032 struct drm_i915_private *i915 = inode->i_private;
2033 struct intel_guc *guc = &i915->gt.uc.guc;
2034 struct intel_guc_log *log = &guc->log;
2036 if (!intel_guc_is_running(guc))
2039 file->private_data = log;
2041 return intel_guc_log_relay_open(log);
2045 i915_guc_log_relay_write(struct file *filp,
2046 const char __user *ubuf,
2050 struct intel_guc_log *log = filp->private_data;
2052 intel_guc_log_relay_flush(log);
2056 static int i915_guc_log_relay_release(struct inode *inode, struct file *file)
2058 struct drm_i915_private *i915 = inode->i_private;
2059 struct intel_guc *guc = &i915->gt.uc.guc;
2061 intel_guc_log_relay_close(&guc->log);
2065 static const struct file_operations i915_guc_log_relay_fops = {
2066 .owner = THIS_MODULE,
2067 .open = i915_guc_log_relay_open,
2068 .write = i915_guc_log_relay_write,
2069 .release = i915_guc_log_relay_release,
2072 static int i915_psr_sink_status_show(struct seq_file *m, void *data)
2075 static const char * const sink_status[] = {
2077 "transition to active, capture and display",
2078 "active, display from RFB",
2079 "active, capture and display on sink device timings",
2080 "transition to inactive, capture and display, timing re-sync",
2083 "sink internal error",
2085 struct drm_connector *connector = m->private;
2086 struct drm_i915_private *dev_priv = to_i915(connector->dev);
2087 struct intel_dp *intel_dp =
2088 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
2091 if (!CAN_PSR(dev_priv)) {
2092 seq_puts(m, "PSR Unsupported\n");
2096 if (connector->status != connector_status_connected)
2099 ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
2102 const char *str = "unknown";
2104 val &= DP_PSR_SINK_STATE_MASK;
2105 if (val < ARRAY_SIZE(sink_status))
2106 str = sink_status[val];
2107 seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
2114 DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
2117 psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
2119 u32 val, status_val;
2120 const char *status = "unknown";
2122 if (dev_priv->psr.psr2_enabled) {
2123 static const char * const live_status[] = {
2136 val = I915_READ(EDP_PSR2_STATUS);
2137 status_val = (val & EDP_PSR2_STATUS_STATE_MASK) >>
2138 EDP_PSR2_STATUS_STATE_SHIFT;
2139 if (status_val < ARRAY_SIZE(live_status))
2140 status = live_status[status_val];
2142 static const char * const live_status[] = {
2152 val = I915_READ(EDP_PSR_STATUS);
2153 status_val = (val & EDP_PSR_STATUS_STATE_MASK) >>
2154 EDP_PSR_STATUS_STATE_SHIFT;
2155 if (status_val < ARRAY_SIZE(live_status))
2156 status = live_status[status_val];
2159 seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val);
2162 static int i915_edp_psr_status(struct seq_file *m, void *data)
2164 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2165 struct i915_psr *psr = &dev_priv->psr;
2166 intel_wakeref_t wakeref;
2171 if (!HAS_PSR(dev_priv))
2174 seq_printf(m, "Sink support: %s", yesno(psr->sink_support));
2176 seq_printf(m, " [0x%02x]", psr->dp->psr_dpcd[0]);
2179 if (!psr->sink_support)
2182 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2183 mutex_lock(&psr->lock);
2186 status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
2188 status = "disabled";
2189 seq_printf(m, "PSR mode: %s\n", status);
2194 if (psr->psr2_enabled) {
2195 val = I915_READ(EDP_PSR2_CTL);
2196 enabled = val & EDP_PSR2_ENABLE;
2198 val = I915_READ(EDP_PSR_CTL);
2199 enabled = val & EDP_PSR_ENABLE;
2201 seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
2202 enableddisabled(enabled), val);
2203 psr_source_status(dev_priv, m);
2204 seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
2205 psr->busy_frontbuffer_bits);
2208 * SKL+ Perf counter is reset to 0 everytime DC state is entered
2210 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
2211 val = I915_READ(EDP_PSR_PERF_CNT) & EDP_PSR_PERF_CNT_MASK;
2212 seq_printf(m, "Performance counter: %u\n", val);
2215 if (psr->debug & I915_PSR_DEBUG_IRQ) {
2216 seq_printf(m, "Last attempted entry at: %lld\n",
2217 psr->last_entry_attempt);
2218 seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
2221 if (psr->psr2_enabled) {
2222 u32 su_frames_val[3];
2226 * Reading all 3 registers before hand to minimize crossing a
2227 * frame boundary between register reads
2229 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3)
2230 su_frames_val[frame / 3] = I915_READ(PSR2_SU_STATUS(frame));
2232 seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
2234 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
2237 su_blocks = su_frames_val[frame / 3] &
2238 PSR2_SU_STATUS_MASK(frame);
2239 su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
2240 seq_printf(m, "%d\t%d\n", frame, su_blocks);
2245 mutex_unlock(&psr->lock);
2246 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2252 i915_edp_psr_debug_set(void *data, u64 val)
2254 struct drm_i915_private *dev_priv = data;
2255 intel_wakeref_t wakeref;
2258 if (!CAN_PSR(dev_priv))
2261 DRM_DEBUG_KMS("Setting PSR debug to %llx\n", val);
2263 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2265 ret = intel_psr_debug_set(dev_priv, val);
2267 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2273 i915_edp_psr_debug_get(void *data, u64 *val)
2275 struct drm_i915_private *dev_priv = data;
2277 if (!CAN_PSR(dev_priv))
2280 *val = READ_ONCE(dev_priv->psr.debug);
2284 DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
2285 i915_edp_psr_debug_get, i915_edp_psr_debug_set,
2288 static int i915_energy_uJ(struct seq_file *m, void *data)
2290 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2291 unsigned long long power;
2292 intel_wakeref_t wakeref;
2295 if (INTEL_GEN(dev_priv) < 6)
2298 if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power))
2301 units = (power & 0x1f00) >> 8;
2302 with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
2303 power = I915_READ(MCH_SECP_NRG_STTS);
2305 power = (1000000 * power) >> units; /* convert to uJ */
2306 seq_printf(m, "%llu", power);
2311 static int i915_runtime_pm_status(struct seq_file *m, void *unused)
2313 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2314 struct pci_dev *pdev = dev_priv->drm.pdev;
2316 if (!HAS_RUNTIME_PM(dev_priv))
2317 seq_puts(m, "Runtime power management not supported\n");
2319 seq_printf(m, "Runtime power status: %s\n",
2320 enableddisabled(!dev_priv->power_domains.wakeref));
2322 seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->gt.awake));
2323 seq_printf(m, "IRQs disabled: %s\n",
2324 yesno(!intel_irqs_enabled(dev_priv)));
2326 seq_printf(m, "Usage count: %d\n",
2327 atomic_read(&dev_priv->drm.dev->power.usage_count));
2329 seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
2331 seq_printf(m, "PCI device power state: %s [%d]\n",
2332 pci_power_name(pdev->current_state),
2333 pdev->current_state);
2335 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)) {
2336 struct drm_printer p = drm_seq_file_printer(m);
2338 print_intel_runtime_pm_wakeref(&dev_priv->runtime_pm, &p);
2344 static int i915_power_domain_info(struct seq_file *m, void *unused)
2346 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2347 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2350 mutex_lock(&power_domains->lock);
2352 seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
2353 for (i = 0; i < power_domains->power_well_count; i++) {
2354 struct i915_power_well *power_well;
2355 enum intel_display_power_domain power_domain;
2357 power_well = &power_domains->power_wells[i];
2358 seq_printf(m, "%-25s %d\n", power_well->desc->name,
2361 for_each_power_domain(power_domain, power_well->desc->domains)
2362 seq_printf(m, " %-23s %d\n",
2363 intel_display_power_domain_str(dev_priv,
2365 power_domains->domain_use_count[power_domain]);
2368 mutex_unlock(&power_domains->lock);
2373 static int i915_dmc_info(struct seq_file *m, void *unused)
2375 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2376 intel_wakeref_t wakeref;
2377 struct intel_csr *csr;
2378 i915_reg_t dc5_reg, dc6_reg = {};
2380 if (!HAS_CSR(dev_priv))
2383 csr = &dev_priv->csr;
2385 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2387 seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
2388 seq_printf(m, "path: %s\n", csr->fw_path);
2390 if (!csr->dmc_payload)
2393 seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
2394 CSR_VERSION_MINOR(csr->version));
2396 if (INTEL_GEN(dev_priv) >= 12) {
2397 dc5_reg = TGL_DMC_DEBUG_DC5_COUNT;
2398 dc6_reg = TGL_DMC_DEBUG_DC6_COUNT;
2400 dc5_reg = IS_BROXTON(dev_priv) ? BXT_CSR_DC3_DC5_COUNT :
2401 SKL_CSR_DC3_DC5_COUNT;
2402 if (!IS_GEN9_LP(dev_priv))
2403 dc6_reg = SKL_CSR_DC5_DC6_COUNT;
2406 seq_printf(m, "DC3 -> DC5 count: %d\n", I915_READ(dc5_reg));
2408 seq_printf(m, "DC5 -> DC6 count: %d\n", I915_READ(dc6_reg));
2411 seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
2412 seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE));
2413 seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL));
2415 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2420 static void intel_seq_print_mode(struct seq_file *m, int tabs,
2421 struct drm_display_mode *mode)
2425 for (i = 0; i < tabs; i++)
2428 seq_printf(m, DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
2431 static void intel_encoder_info(struct seq_file *m,
2432 struct intel_crtc *intel_crtc,
2433 struct intel_encoder *intel_encoder)
2435 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2436 struct drm_device *dev = &dev_priv->drm;
2437 struct drm_crtc *crtc = &intel_crtc->base;
2438 struct intel_connector *intel_connector;
2439 struct drm_encoder *encoder;
2441 encoder = &intel_encoder->base;
2442 seq_printf(m, "\tencoder %d: type: %s, connectors:\n",
2443 encoder->base.id, encoder->name);
2444 for_each_connector_on_encoder(dev, encoder, intel_connector) {
2445 struct drm_connector *connector = &intel_connector->base;
2446 seq_printf(m, "\t\tconnector %d: type: %s, status: %s",
2449 drm_get_connector_status_name(connector->status));
2450 if (connector->status == connector_status_connected) {
2451 struct drm_display_mode *mode = &crtc->mode;
2452 seq_printf(m, ", mode:\n");
2453 intel_seq_print_mode(m, 2, mode);
2460 static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2462 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2463 struct drm_device *dev = &dev_priv->drm;
2464 struct drm_crtc *crtc = &intel_crtc->base;
2465 struct intel_encoder *intel_encoder;
2466 struct drm_plane_state *plane_state = crtc->primary->state;
2467 struct drm_framebuffer *fb = plane_state->fb;
2470 seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
2471 fb->base.id, plane_state->src_x >> 16,
2472 plane_state->src_y >> 16, fb->width, fb->height);
2474 seq_puts(m, "\tprimary plane disabled\n");
2475 for_each_encoder_on_crtc(dev, crtc, intel_encoder)
2476 intel_encoder_info(m, intel_crtc, intel_encoder);
2479 static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
2481 struct drm_display_mode *mode = panel->fixed_mode;
2483 seq_printf(m, "\tfixed mode:\n");
2484 intel_seq_print_mode(m, 2, mode);
2487 static void intel_hdcp_info(struct seq_file *m,
2488 struct intel_connector *intel_connector)
2490 bool hdcp_cap, hdcp2_cap;
2492 hdcp_cap = intel_hdcp_capable(intel_connector);
2493 hdcp2_cap = intel_hdcp2_capable(intel_connector);
2496 seq_puts(m, "HDCP1.4 ");
2498 seq_puts(m, "HDCP2.2 ");
2500 if (!hdcp_cap && !hdcp2_cap)
2501 seq_puts(m, "None");
2506 static void intel_dp_info(struct seq_file *m,
2507 struct intel_connector *intel_connector)
2509 struct intel_encoder *intel_encoder = intel_connector->encoder;
2510 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
2512 seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
2513 seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
2514 if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
2515 intel_panel_info(m, &intel_connector->panel);
2517 drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
2519 if (intel_connector->hdcp.shim) {
2520 seq_puts(m, "\tHDCP version: ");
2521 intel_hdcp_info(m, intel_connector);
2525 static void intel_dp_mst_info(struct seq_file *m,
2526 struct intel_connector *intel_connector)
2528 struct intel_encoder *intel_encoder = intel_connector->encoder;
2529 struct intel_dp_mst_encoder *intel_mst =
2530 enc_to_mst(&intel_encoder->base);
2531 struct intel_digital_port *intel_dig_port = intel_mst->primary;
2532 struct intel_dp *intel_dp = &intel_dig_port->dp;
2533 bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
2534 intel_connector->port);
2536 seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
2539 static void intel_hdmi_info(struct seq_file *m,
2540 struct intel_connector *intel_connector)
2542 struct intel_encoder *intel_encoder = intel_connector->encoder;
2543 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
2545 seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
2546 if (intel_connector->hdcp.shim) {
2547 seq_puts(m, "\tHDCP version: ");
2548 intel_hdcp_info(m, intel_connector);
2552 static void intel_lvds_info(struct seq_file *m,
2553 struct intel_connector *intel_connector)
2555 intel_panel_info(m, &intel_connector->panel);
2558 static void intel_connector_info(struct seq_file *m,
2559 struct drm_connector *connector)
2561 struct intel_connector *intel_connector = to_intel_connector(connector);
2562 struct intel_encoder *intel_encoder = intel_connector->encoder;
2563 struct drm_display_mode *mode;
2565 seq_printf(m, "connector %d: type %s, status: %s\n",
2566 connector->base.id, connector->name,
2567 drm_get_connector_status_name(connector->status));
2569 if (connector->status == connector_status_disconnected)
2572 seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
2573 connector->display_info.width_mm,
2574 connector->display_info.height_mm);
2575 seq_printf(m, "\tsubpixel order: %s\n",
2576 drm_get_subpixel_order_name(connector->display_info.subpixel_order));
2577 seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev);
2582 switch (connector->connector_type) {
2583 case DRM_MODE_CONNECTOR_DisplayPort:
2584 case DRM_MODE_CONNECTOR_eDP:
2585 if (intel_encoder->type == INTEL_OUTPUT_DP_MST)
2586 intel_dp_mst_info(m, intel_connector);
2588 intel_dp_info(m, intel_connector);
2590 case DRM_MODE_CONNECTOR_LVDS:
2591 if (intel_encoder->type == INTEL_OUTPUT_LVDS)
2592 intel_lvds_info(m, intel_connector);
2594 case DRM_MODE_CONNECTOR_HDMIA:
2595 if (intel_encoder->type == INTEL_OUTPUT_HDMI ||
2596 intel_encoder->type == INTEL_OUTPUT_DDI)
2597 intel_hdmi_info(m, intel_connector);
2603 seq_printf(m, "\tmodes:\n");
2604 list_for_each_entry(mode, &connector->modes, head)
2605 intel_seq_print_mode(m, 2, mode);
2608 static const char *plane_type(enum drm_plane_type type)
2611 case DRM_PLANE_TYPE_OVERLAY:
2613 case DRM_PLANE_TYPE_PRIMARY:
2615 case DRM_PLANE_TYPE_CURSOR:
2618 * Deliberately omitting default: to generate compiler warnings
2619 * when a new drm_plane_type gets added.
2626 static void plane_rotation(char *buf, size_t bufsize, unsigned int rotation)
2629 * According to doc only one DRM_MODE_ROTATE_ is allowed but this
2630 * will print them all to visualize if the values are misused
2632 snprintf(buf, bufsize,
2633 "%s%s%s%s%s%s(0x%08x)",
2634 (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
2635 (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
2636 (rotation & DRM_MODE_ROTATE_180) ? "180 " : "",
2637 (rotation & DRM_MODE_ROTATE_270) ? "270 " : "",
2638 (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
2639 (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
2643 static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2645 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2646 struct drm_device *dev = &dev_priv->drm;
2647 struct intel_plane *intel_plane;
2649 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
2650 struct drm_plane_state *state;
2651 struct drm_plane *plane = &intel_plane->base;
2652 struct drm_format_name_buf format_name;
2655 if (!plane->state) {
2656 seq_puts(m, "plane->state is NULL!\n");
2660 state = plane->state;
2663 drm_get_format_name(state->fb->format->format,
2666 sprintf(format_name.str, "N/A");
2669 plane_rotation(rot_str, sizeof(rot_str), state->rotation);
2671 seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n",
2673 plane_type(intel_plane->base.type),
2674 state->crtc_x, state->crtc_y,
2675 state->crtc_w, state->crtc_h,
2676 (state->src_x >> 16),
2677 ((state->src_x & 0xffff) * 15625) >> 10,
2678 (state->src_y >> 16),
2679 ((state->src_y & 0xffff) * 15625) >> 10,
2680 (state->src_w >> 16),
2681 ((state->src_w & 0xffff) * 15625) >> 10,
2682 (state->src_h >> 16),
2683 ((state->src_h & 0xffff) * 15625) >> 10,
2689 static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2691 struct intel_crtc_state *pipe_config;
2692 int num_scalers = intel_crtc->num_scalers;
2695 pipe_config = to_intel_crtc_state(intel_crtc->base.state);
2697 /* Not all platformas have a scaler */
2699 seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
2701 pipe_config->scaler_state.scaler_users,
2702 pipe_config->scaler_state.scaler_id);
2704 for (i = 0; i < num_scalers; i++) {
2705 struct intel_scaler *sc =
2706 &pipe_config->scaler_state.scalers[i];
2708 seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
2709 i, yesno(sc->in_use), sc->mode);
2713 seq_puts(m, "\tNo scalers available on this platform\n");
2717 static int i915_display_info(struct seq_file *m, void *unused)
2719 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2720 struct drm_device *dev = &dev_priv->drm;
2721 struct intel_crtc *crtc;
2722 struct drm_connector *connector;
2723 struct drm_connector_list_iter conn_iter;
2724 intel_wakeref_t wakeref;
2726 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2728 seq_printf(m, "CRTC info\n");
2729 seq_printf(m, "---------\n");
2730 for_each_intel_crtc(dev, crtc) {
2731 struct intel_crtc_state *pipe_config;
2733 drm_modeset_lock(&crtc->base.mutex, NULL);
2734 pipe_config = to_intel_crtc_state(crtc->base.state);
2736 seq_printf(m, "CRTC %d: pipe: %c, active=%s, (size=%dx%d), dither=%s, bpp=%d\n",
2737 crtc->base.base.id, pipe_name(crtc->pipe),
2738 yesno(pipe_config->base.active),
2739 pipe_config->pipe_src_w, pipe_config->pipe_src_h,
2740 yesno(pipe_config->dither), pipe_config->pipe_bpp);
2742 if (pipe_config->base.active) {
2743 struct intel_plane *cursor =
2744 to_intel_plane(crtc->base.cursor);
2746 intel_crtc_info(m, crtc);
2748 seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x\n",
2749 yesno(cursor->base.state->visible),
2750 cursor->base.state->crtc_x,
2751 cursor->base.state->crtc_y,
2752 cursor->base.state->crtc_w,
2753 cursor->base.state->crtc_h,
2754 cursor->cursor.base);
2755 intel_scaler_info(m, crtc);
2756 intel_plane_info(m, crtc);
2759 seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
2760 yesno(!crtc->cpu_fifo_underrun_disabled),
2761 yesno(!crtc->pch_fifo_underrun_disabled));
2762 drm_modeset_unlock(&crtc->base.mutex);
2765 seq_printf(m, "\n");
2766 seq_printf(m, "Connector info\n");
2767 seq_printf(m, "--------------\n");
2768 mutex_lock(&dev->mode_config.mutex);
2769 drm_connector_list_iter_begin(dev, &conn_iter);
2770 drm_for_each_connector_iter(connector, &conn_iter)
2771 intel_connector_info(m, connector);
2772 drm_connector_list_iter_end(&conn_iter);
2773 mutex_unlock(&dev->mode_config.mutex);
2775 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2780 static int i915_engine_info(struct seq_file *m, void *unused)
2782 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2783 struct intel_engine_cs *engine;
2784 intel_wakeref_t wakeref;
2785 struct drm_printer p;
2787 wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2789 seq_printf(m, "GT awake? %s [%d]\n",
2790 yesno(dev_priv->gt.awake),
2791 atomic_read(&dev_priv->gt.wakeref.count));
2792 seq_printf(m, "CS timestamp frequency: %u kHz\n",
2793 RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz);
2795 p = drm_seq_file_printer(m);
2796 for_each_uabi_engine(engine, dev_priv)
2797 intel_engine_dump(engine, &p, "%s\n", engine->name);
2799 intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2804 static int i915_rcs_topology(struct seq_file *m, void *unused)
2806 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2807 struct drm_printer p = drm_seq_file_printer(m);
2809 intel_device_info_dump_topology(&RUNTIME_INFO(dev_priv)->sseu, &p);
2814 static int i915_shrinker_info(struct seq_file *m, void *unused)
2816 struct drm_i915_private *i915 = node_to_i915(m->private);
2818 seq_printf(m, "seeks = %d\n", i915->mm.shrinker.seeks);
2819 seq_printf(m, "batch = %lu\n", i915->mm.shrinker.batch);
2824 static int i915_shared_dplls_info(struct seq_file *m, void *unused)
2826 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2827 struct drm_device *dev = &dev_priv->drm;
2830 drm_modeset_lock_all(dev);
2831 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
2832 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
2834 seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name,
2836 seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
2837 pll->state.crtc_mask, pll->active_mask, yesno(pll->on));
2838 seq_printf(m, " tracked hardware state:\n");
2839 seq_printf(m, " dpll: 0x%08x\n", pll->state.hw_state.dpll);
2840 seq_printf(m, " dpll_md: 0x%08x\n",
2841 pll->state.hw_state.dpll_md);
2842 seq_printf(m, " fp0: 0x%08x\n", pll->state.hw_state.fp0);
2843 seq_printf(m, " fp1: 0x%08x\n", pll->state.hw_state.fp1);
2844 seq_printf(m, " wrpll: 0x%08x\n", pll->state.hw_state.wrpll);
2845 seq_printf(m, " cfgcr0: 0x%08x\n", pll->state.hw_state.cfgcr0);
2846 seq_printf(m, " cfgcr1: 0x%08x\n", pll->state.hw_state.cfgcr1);
2847 seq_printf(m, " mg_refclkin_ctl: 0x%08x\n",
2848 pll->state.hw_state.mg_refclkin_ctl);
2849 seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n",
2850 pll->state.hw_state.mg_clktop2_coreclkctl1);
2851 seq_printf(m, " mg_clktop2_hsclkctl: 0x%08x\n",
2852 pll->state.hw_state.mg_clktop2_hsclkctl);
2853 seq_printf(m, " mg_pll_div0: 0x%08x\n",
2854 pll->state.hw_state.mg_pll_div0);
2855 seq_printf(m, " mg_pll_div1: 0x%08x\n",
2856 pll->state.hw_state.mg_pll_div1);
2857 seq_printf(m, " mg_pll_lf: 0x%08x\n",
2858 pll->state.hw_state.mg_pll_lf);
2859 seq_printf(m, " mg_pll_frac_lock: 0x%08x\n",
2860 pll->state.hw_state.mg_pll_frac_lock);
2861 seq_printf(m, " mg_pll_ssc: 0x%08x\n",
2862 pll->state.hw_state.mg_pll_ssc);
2863 seq_printf(m, " mg_pll_bias: 0x%08x\n",
2864 pll->state.hw_state.mg_pll_bias);
2865 seq_printf(m, " mg_pll_tdc_coldst_bias: 0x%08x\n",
2866 pll->state.hw_state.mg_pll_tdc_coldst_bias);
2868 drm_modeset_unlock_all(dev);
2873 static int i915_wa_registers(struct seq_file *m, void *unused)
2875 struct drm_i915_private *i915 = node_to_i915(m->private);
2876 struct intel_engine_cs *engine;
2878 for_each_uabi_engine(engine, i915) {
2879 const struct i915_wa_list *wal = &engine->ctx_wa_list;
2880 const struct i915_wa *wa;
2887 seq_printf(m, "%s: Workarounds applied: %u\n",
2888 engine->name, count);
2890 for (wa = wal->list; count--; wa++)
2891 seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n",
2892 i915_mmio_reg_offset(wa->reg),
2895 seq_printf(m, "\n");
2901 static int i915_ipc_status_show(struct seq_file *m, void *data)
2903 struct drm_i915_private *dev_priv = m->private;
2905 seq_printf(m, "Isochronous Priority Control: %s\n",
2906 yesno(dev_priv->ipc_enabled));
2910 static int i915_ipc_status_open(struct inode *inode, struct file *file)
2912 struct drm_i915_private *dev_priv = inode->i_private;
2914 if (!HAS_IPC(dev_priv))
2917 return single_open(file, i915_ipc_status_show, dev_priv);
2920 static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
2921 size_t len, loff_t *offp)
2923 struct seq_file *m = file->private_data;
2924 struct drm_i915_private *dev_priv = m->private;
2925 intel_wakeref_t wakeref;
2929 ret = kstrtobool_from_user(ubuf, len, &enable);
2933 with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
2934 if (!dev_priv->ipc_enabled && enable)
2935 DRM_INFO("Enabling IPC: WM will be proper only after next commit\n");
2936 dev_priv->wm.distrust_bios_wm = true;
2937 dev_priv->ipc_enabled = enable;
2938 intel_enable_ipc(dev_priv);
2944 static const struct file_operations i915_ipc_status_fops = {
2945 .owner = THIS_MODULE,
2946 .open = i915_ipc_status_open,
2948 .llseek = seq_lseek,
2949 .release = single_release,
2950 .write = i915_ipc_status_write
2953 static int i915_ddb_info(struct seq_file *m, void *unused)
2955 struct drm_i915_private *dev_priv = node_to_i915(m->private);
2956 struct drm_device *dev = &dev_priv->drm;
2957 struct skl_ddb_entry *entry;
2958 struct intel_crtc *crtc;
2960 if (INTEL_GEN(dev_priv) < 9)
2963 drm_modeset_lock_all(dev);
2965 seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
2967 for_each_intel_crtc(&dev_priv->drm, crtc) {
2968 struct intel_crtc_state *crtc_state =
2969 to_intel_crtc_state(crtc->base.state);
2970 enum pipe pipe = crtc->pipe;
2971 enum plane_id plane_id;
2973 seq_printf(m, "Pipe %c\n", pipe_name(pipe));
2975 for_each_plane_id_on_crtc(crtc, plane_id) {
2976 entry = &crtc_state->wm.skl.plane_ddb_y[plane_id];
2977 seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane_id + 1,
2978 entry->start, entry->end,
2979 skl_ddb_entry_size(entry));
2982 entry = &crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
2983 seq_printf(m, " %-13s%8u%8u%8u\n", "Cursor", entry->start,
2984 entry->end, skl_ddb_entry_size(entry));
2987 drm_modeset_unlock_all(dev);
2992 static void drrs_status_per_crtc(struct seq_file *m,
2993 struct drm_device *dev,
2994 struct intel_crtc *intel_crtc)
2996 struct drm_i915_private *dev_priv = to_i915(dev);
2997 struct i915_drrs *drrs = &dev_priv->drrs;
2999 struct drm_connector *connector;
3000 struct drm_connector_list_iter conn_iter;
3002 drm_connector_list_iter_begin(dev, &conn_iter);
3003 drm_for_each_connector_iter(connector, &conn_iter) {
3004 if (connector->state->crtc != &intel_crtc->base)
3007 seq_printf(m, "%s:\n", connector->name);
3009 drm_connector_list_iter_end(&conn_iter);
3011 if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT)
3012 seq_puts(m, "\tVBT: DRRS_type: Static");
3013 else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT)
3014 seq_puts(m, "\tVBT: DRRS_type: Seamless");
3015 else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED)
3016 seq_puts(m, "\tVBT: DRRS_type: None");
3018 seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value");
3020 seq_puts(m, "\n\n");
3022 if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
3023 struct intel_panel *panel;
3025 mutex_lock(&drrs->mutex);
3026 /* DRRS Supported */
3027 seq_puts(m, "\tDRRS Supported: Yes\n");
3029 /* disable_drrs() will make drrs->dp NULL */
3031 seq_puts(m, "Idleness DRRS: Disabled\n");
3032 if (dev_priv->psr.enabled)
3034 "\tAs PSR is enabled, DRRS is not enabled\n");
3035 mutex_unlock(&drrs->mutex);
3039 panel = &drrs->dp->attached_connector->panel;
3040 seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
3041 drrs->busy_frontbuffer_bits);
3043 seq_puts(m, "\n\t\t");
3044 if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
3045 seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
3046 vrefresh = panel->fixed_mode->vrefresh;
3047 } else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
3048 seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
3049 vrefresh = panel->downclock_mode->vrefresh;
3051 seq_printf(m, "DRRS_State: Unknown(%d)\n",
3052 drrs->refresh_rate_type);
3053 mutex_unlock(&drrs->mutex);
3056 seq_printf(m, "\t\tVrefresh: %d", vrefresh);
3058 seq_puts(m, "\n\t\t");
3059 mutex_unlock(&drrs->mutex);
3061 /* DRRS not supported. Print the VBT parameter*/
3062 seq_puts(m, "\tDRRS Supported : No");
3067 static int i915_drrs_status(struct seq_file *m, void *unused)
3069 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3070 struct drm_device *dev = &dev_priv->drm;
3071 struct intel_crtc *intel_crtc;
3072 int active_crtc_cnt = 0;
3074 drm_modeset_lock_all(dev);
3075 for_each_intel_crtc(dev, intel_crtc) {
3076 if (intel_crtc->base.state->active) {
3078 seq_printf(m, "\nCRTC %d: ", active_crtc_cnt);
3080 drrs_status_per_crtc(m, dev, intel_crtc);
3083 drm_modeset_unlock_all(dev);
3085 if (!active_crtc_cnt)
3086 seq_puts(m, "No active crtc found\n");
3091 static int i915_dp_mst_info(struct seq_file *m, void *unused)
3093 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3094 struct drm_device *dev = &dev_priv->drm;
3095 struct intel_encoder *intel_encoder;
3096 struct intel_digital_port *intel_dig_port;
3097 struct drm_connector *connector;
3098 struct drm_connector_list_iter conn_iter;
3100 drm_connector_list_iter_begin(dev, &conn_iter);
3101 drm_for_each_connector_iter(connector, &conn_iter) {
3102 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
3105 intel_encoder = intel_attached_encoder(connector);
3106 if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
3109 intel_dig_port = enc_to_dig_port(&intel_encoder->base);
3110 if (!intel_dig_port->dp.can_mst)
3113 seq_printf(m, "MST Source Port %c\n",
3114 port_name(intel_dig_port->base.port));
3115 drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
3117 drm_connector_list_iter_end(&conn_iter);
3122 static ssize_t i915_displayport_test_active_write(struct file *file,
3123 const char __user *ubuf,
3124 size_t len, loff_t *offp)
3128 struct drm_device *dev;
3129 struct drm_connector *connector;
3130 struct drm_connector_list_iter conn_iter;
3131 struct intel_dp *intel_dp;
3134 dev = ((struct seq_file *)file->private_data)->private;
3139 input_buffer = memdup_user_nul(ubuf, len);
3140 if (IS_ERR(input_buffer))
3141 return PTR_ERR(input_buffer);
3143 DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);
3145 drm_connector_list_iter_begin(dev, &conn_iter);
3146 drm_for_each_connector_iter(connector, &conn_iter) {
3147 struct intel_encoder *encoder;
3149 if (connector->connector_type !=
3150 DRM_MODE_CONNECTOR_DisplayPort)
3153 encoder = to_intel_encoder(connector->encoder);
3154 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3157 if (encoder && connector->status == connector_status_connected) {
3158 intel_dp = enc_to_intel_dp(&encoder->base);
3159 status = kstrtoint(input_buffer, 10, &val);
3162 DRM_DEBUG_DRIVER("Got %d for test active\n", val);
3163 /* To prevent erroneous activation of the compliance
3164 * testing code, only accept an actual value of 1 here
3167 intel_dp->compliance.test_active = 1;
3169 intel_dp->compliance.test_active = 0;
3172 drm_connector_list_iter_end(&conn_iter);
3173 kfree(input_buffer);
3181 static int i915_displayport_test_active_show(struct seq_file *m, void *data)
3183 struct drm_i915_private *dev_priv = m->private;
3184 struct drm_device *dev = &dev_priv->drm;
3185 struct drm_connector *connector;
3186 struct drm_connector_list_iter conn_iter;
3187 struct intel_dp *intel_dp;
3189 drm_connector_list_iter_begin(dev, &conn_iter);
3190 drm_for_each_connector_iter(connector, &conn_iter) {
3191 struct intel_encoder *encoder;
3193 if (connector->connector_type !=
3194 DRM_MODE_CONNECTOR_DisplayPort)
3197 encoder = to_intel_encoder(connector->encoder);
3198 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3201 if (encoder && connector->status == connector_status_connected) {
3202 intel_dp = enc_to_intel_dp(&encoder->base);
3203 if (intel_dp->compliance.test_active)
3210 drm_connector_list_iter_end(&conn_iter);
3215 static int i915_displayport_test_active_open(struct inode *inode,
3218 return single_open(file, i915_displayport_test_active_show,
3222 static const struct file_operations i915_displayport_test_active_fops = {
3223 .owner = THIS_MODULE,
3224 .open = i915_displayport_test_active_open,
3226 .llseek = seq_lseek,
3227 .release = single_release,
3228 .write = i915_displayport_test_active_write
3231 static int i915_displayport_test_data_show(struct seq_file *m, void *data)
3233 struct drm_i915_private *dev_priv = m->private;
3234 struct drm_device *dev = &dev_priv->drm;
3235 struct drm_connector *connector;
3236 struct drm_connector_list_iter conn_iter;
3237 struct intel_dp *intel_dp;
3239 drm_connector_list_iter_begin(dev, &conn_iter);
3240 drm_for_each_connector_iter(connector, &conn_iter) {
3241 struct intel_encoder *encoder;
3243 if (connector->connector_type !=
3244 DRM_MODE_CONNECTOR_DisplayPort)
3247 encoder = to_intel_encoder(connector->encoder);
3248 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3251 if (encoder && connector->status == connector_status_connected) {
3252 intel_dp = enc_to_intel_dp(&encoder->base);
3253 if (intel_dp->compliance.test_type ==
3254 DP_TEST_LINK_EDID_READ)
3255 seq_printf(m, "%lx",
3256 intel_dp->compliance.test_data.edid);
3257 else if (intel_dp->compliance.test_type ==
3258 DP_TEST_LINK_VIDEO_PATTERN) {
3259 seq_printf(m, "hdisplay: %d\n",
3260 intel_dp->compliance.test_data.hdisplay);
3261 seq_printf(m, "vdisplay: %d\n",
3262 intel_dp->compliance.test_data.vdisplay);
3263 seq_printf(m, "bpc: %u\n",
3264 intel_dp->compliance.test_data.bpc);
3269 drm_connector_list_iter_end(&conn_iter);
3273 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data);
3275 static int i915_displayport_test_type_show(struct seq_file *m, void *data)
3277 struct drm_i915_private *dev_priv = m->private;
3278 struct drm_device *dev = &dev_priv->drm;
3279 struct drm_connector *connector;
3280 struct drm_connector_list_iter conn_iter;
3281 struct intel_dp *intel_dp;
3283 drm_connector_list_iter_begin(dev, &conn_iter);
3284 drm_for_each_connector_iter(connector, &conn_iter) {
3285 struct intel_encoder *encoder;
3287 if (connector->connector_type !=
3288 DRM_MODE_CONNECTOR_DisplayPort)
3291 encoder = to_intel_encoder(connector->encoder);
3292 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3295 if (encoder && connector->status == connector_status_connected) {
3296 intel_dp = enc_to_intel_dp(&encoder->base);
3297 seq_printf(m, "%02lx", intel_dp->compliance.test_type);
3301 drm_connector_list_iter_end(&conn_iter);
3305 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type);
3307 static void wm_latency_show(struct seq_file *m, const u16 wm[8])
3309 struct drm_i915_private *dev_priv = m->private;
3310 struct drm_device *dev = &dev_priv->drm;
3314 if (IS_CHERRYVIEW(dev_priv))
3316 else if (IS_VALLEYVIEW(dev_priv))
3318 else if (IS_G4X(dev_priv))
3321 num_levels = ilk_wm_max_level(dev_priv) + 1;
3323 drm_modeset_lock_all(dev);
3325 for (level = 0; level < num_levels; level++) {
3326 unsigned int latency = wm[level];
3329 * - WM1+ latency values in 0.5us units
3330 * - latencies are in us on gen9/vlv/chv
3332 if (INTEL_GEN(dev_priv) >= 9 ||
3333 IS_VALLEYVIEW(dev_priv) ||
3334 IS_CHERRYVIEW(dev_priv) ||
3340 seq_printf(m, "WM%d %u (%u.%u usec)\n",
3341 level, wm[level], latency / 10, latency % 10);
3344 drm_modeset_unlock_all(dev);
3347 static int pri_wm_latency_show(struct seq_file *m, void *data)
3349 struct drm_i915_private *dev_priv = m->private;
3350 const u16 *latencies;
3352 if (INTEL_GEN(dev_priv) >= 9)
3353 latencies = dev_priv->wm.skl_latency;
3355 latencies = dev_priv->wm.pri_latency;
3357 wm_latency_show(m, latencies);
3362 static int spr_wm_latency_show(struct seq_file *m, void *data)
3364 struct drm_i915_private *dev_priv = m->private;
3365 const u16 *latencies;
3367 if (INTEL_GEN(dev_priv) >= 9)
3368 latencies = dev_priv->wm.skl_latency;
3370 latencies = dev_priv->wm.spr_latency;
3372 wm_latency_show(m, latencies);
3377 static int cur_wm_latency_show(struct seq_file *m, void *data)
3379 struct drm_i915_private *dev_priv = m->private;
3380 const u16 *latencies;
3382 if (INTEL_GEN(dev_priv) >= 9)
3383 latencies = dev_priv->wm.skl_latency;
3385 latencies = dev_priv->wm.cur_latency;
3387 wm_latency_show(m, latencies);
3392 static int pri_wm_latency_open(struct inode *inode, struct file *file)
3394 struct drm_i915_private *dev_priv = inode->i_private;
3396 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
3399 return single_open(file, pri_wm_latency_show, dev_priv);
3402 static int spr_wm_latency_open(struct inode *inode, struct file *file)
3404 struct drm_i915_private *dev_priv = inode->i_private;
3406 if (HAS_GMCH(dev_priv))
3409 return single_open(file, spr_wm_latency_show, dev_priv);
3412 static int cur_wm_latency_open(struct inode *inode, struct file *file)
3414 struct drm_i915_private *dev_priv = inode->i_private;
3416 if (HAS_GMCH(dev_priv))
3419 return single_open(file, cur_wm_latency_show, dev_priv);
3422 static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
3423 size_t len, loff_t *offp, u16 wm[8])
3425 struct seq_file *m = file->private_data;
3426 struct drm_i915_private *dev_priv = m->private;
3427 struct drm_device *dev = &dev_priv->drm;
3434 if (IS_CHERRYVIEW(dev_priv))
3436 else if (IS_VALLEYVIEW(dev_priv))
3438 else if (IS_G4X(dev_priv))
3441 num_levels = ilk_wm_max_level(dev_priv) + 1;
3443 if (len >= sizeof(tmp))
3446 if (copy_from_user(tmp, ubuf, len))
3451 ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
3452 &new[0], &new[1], &new[2], &new[3],
3453 &new[4], &new[5], &new[6], &new[7]);
3454 if (ret != num_levels)
3457 drm_modeset_lock_all(dev);
3459 for (level = 0; level < num_levels; level++)
3460 wm[level] = new[level];
3462 drm_modeset_unlock_all(dev);
3468 static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
3469 size_t len, loff_t *offp)
3471 struct seq_file *m = file->private_data;
3472 struct drm_i915_private *dev_priv = m->private;
3475 if (INTEL_GEN(dev_priv) >= 9)
3476 latencies = dev_priv->wm.skl_latency;
3478 latencies = dev_priv->wm.pri_latency;
3480 return wm_latency_write(file, ubuf, len, offp, latencies);
3483 static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
3484 size_t len, loff_t *offp)
3486 struct seq_file *m = file->private_data;
3487 struct drm_i915_private *dev_priv = m->private;
3490 if (INTEL_GEN(dev_priv) >= 9)
3491 latencies = dev_priv->wm.skl_latency;
3493 latencies = dev_priv->wm.spr_latency;
3495 return wm_latency_write(file, ubuf, len, offp, latencies);
3498 static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
3499 size_t len, loff_t *offp)
3501 struct seq_file *m = file->private_data;
3502 struct drm_i915_private *dev_priv = m->private;
3505 if (INTEL_GEN(dev_priv) >= 9)
3506 latencies = dev_priv->wm.skl_latency;
3508 latencies = dev_priv->wm.cur_latency;
3510 return wm_latency_write(file, ubuf, len, offp, latencies);
3513 static const struct file_operations i915_pri_wm_latency_fops = {
3514 .owner = THIS_MODULE,
3515 .open = pri_wm_latency_open,
3517 .llseek = seq_lseek,
3518 .release = single_release,
3519 .write = pri_wm_latency_write
3522 static const struct file_operations i915_spr_wm_latency_fops = {
3523 .owner = THIS_MODULE,
3524 .open = spr_wm_latency_open,
3526 .llseek = seq_lseek,
3527 .release = single_release,
3528 .write = spr_wm_latency_write
3531 static const struct file_operations i915_cur_wm_latency_fops = {
3532 .owner = THIS_MODULE,
3533 .open = cur_wm_latency_open,
3535 .llseek = seq_lseek,
3536 .release = single_release,
3537 .write = cur_wm_latency_write
3541 i915_wedged_get(void *data, u64 *val)
3543 struct drm_i915_private *i915 = data;
3544 int ret = intel_gt_terminally_wedged(&i915->gt);
3559 i915_wedged_set(void *data, u64 val)
3561 struct drm_i915_private *i915 = data;
3563 /* Flush any previous reset before applying for a new one */
3564 wait_event(i915->gt.reset.queue,
3565 !test_bit(I915_RESET_BACKOFF, &i915->gt.reset.flags));
3567 intel_gt_handle_error(&i915->gt, val, I915_ERROR_CAPTURE,
3568 "Manually set wedged engine mask = %llx", val);
3572 DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
3573 i915_wedged_get, i915_wedged_set,
3576 #define DROP_UNBOUND BIT(0)
3577 #define DROP_BOUND BIT(1)
3578 #define DROP_RETIRE BIT(2)
3579 #define DROP_ACTIVE BIT(3)
3580 #define DROP_FREED BIT(4)
3581 #define DROP_SHRINK_ALL BIT(5)
3582 #define DROP_IDLE BIT(6)
3583 #define DROP_RESET_ACTIVE BIT(7)
3584 #define DROP_RESET_SEQNO BIT(8)
3585 #define DROP_ALL (DROP_UNBOUND | \
3592 DROP_RESET_ACTIVE | \
3595 i915_drop_caches_get(void *data, u64 *val)
3603 i915_drop_caches_set(void *data, u64 val)
3605 struct drm_i915_private *i915 = data;
3607 DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
3608 val, val & DROP_ALL);
3610 if (val & DROP_RESET_ACTIVE &&
3611 wait_for(intel_engines_are_idle(&i915->gt),
3612 I915_IDLE_ENGINES_TIMEOUT))
3613 intel_gt_set_wedged(&i915->gt);
3615 /* No need to check and wait for gpu resets, only libdrm auto-restarts
3616 * on ioctls on -EAGAIN. */
3617 if (val & (DROP_ACTIVE | DROP_IDLE | DROP_RETIRE | DROP_RESET_SEQNO)) {
3620 ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
3625 * To finish the flush of the idle_worker, we must complete
3626 * the switch-to-kernel-context, which requires a double
3627 * pass through wait_for_idle: first queues the switch,
3628 * second waits for the switch.
3630 if (ret == 0 && val & (DROP_IDLE | DROP_ACTIVE))
3631 ret = i915_gem_wait_for_idle(i915,
3632 I915_WAIT_INTERRUPTIBLE |
3634 MAX_SCHEDULE_TIMEOUT);
3636 if (ret == 0 && val & DROP_IDLE)
3637 ret = i915_gem_wait_for_idle(i915,
3638 I915_WAIT_INTERRUPTIBLE |
3640 MAX_SCHEDULE_TIMEOUT);
3642 if (val & DROP_RETIRE)
3643 i915_retire_requests(i915);
3645 mutex_unlock(&i915->drm.struct_mutex);
3647 if (ret == 0 && val & DROP_IDLE)
3648 ret = intel_gt_pm_wait_for_idle(&i915->gt);
3651 if (val & DROP_RESET_ACTIVE && intel_gt_terminally_wedged(&i915->gt))
3652 intel_gt_handle_error(&i915->gt, ALL_ENGINES, 0, NULL);
3654 fs_reclaim_acquire(GFP_KERNEL);
3655 if (val & DROP_BOUND)
3656 i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_BOUND);
3658 if (val & DROP_UNBOUND)
3659 i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_UNBOUND);
3661 if (val & DROP_SHRINK_ALL)
3662 i915_gem_shrink_all(i915);
3663 fs_reclaim_release(GFP_KERNEL);
3665 if (val & DROP_IDLE) {
3666 flush_delayed_work(&i915->gem.retire_work);
3667 flush_work(&i915->gem.idle_work);
3670 if (val & DROP_FREED)
3671 i915_gem_drain_freed_objects(i915);
3676 DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
3677 i915_drop_caches_get, i915_drop_caches_set,
3681 i915_cache_sharing_get(void *data, u64 *val)
3683 struct drm_i915_private *dev_priv = data;
3684 intel_wakeref_t wakeref;
3687 if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
3690 with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
3691 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
3693 *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
3699 i915_cache_sharing_set(void *data, u64 val)
3701 struct drm_i915_private *dev_priv = data;
3702 intel_wakeref_t wakeref;
3704 if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
3710 DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
3711 with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
3714 /* Update the cache sharing policy here as well */
3715 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
3716 snpcr &= ~GEN6_MBC_SNPCR_MASK;
3717 snpcr |= val << GEN6_MBC_SNPCR_SHIFT;
3718 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
3724 DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
3725 i915_cache_sharing_get, i915_cache_sharing_set,
3728 static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv,
3729 struct sseu_dev_info *sseu)
3732 const int ss_max = SS_MAX;
3733 u32 sig1[SS_MAX], sig2[SS_MAX];
3736 sig1[0] = I915_READ(CHV_POWER_SS0_SIG1);
3737 sig1[1] = I915_READ(CHV_POWER_SS1_SIG1);
3738 sig2[0] = I915_READ(CHV_POWER_SS0_SIG2);
3739 sig2[1] = I915_READ(CHV_POWER_SS1_SIG2);
3741 for (ss = 0; ss < ss_max; ss++) {
3742 unsigned int eu_cnt;
3744 if (sig1[ss] & CHV_SS_PG_ENABLE)
3745 /* skip disabled subslice */
3748 sseu->slice_mask = BIT(0);
3749 sseu->subslice_mask[0] |= BIT(ss);
3750 eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
3751 ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
3752 ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
3753 ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
3754 sseu->eu_total += eu_cnt;
3755 sseu->eu_per_subslice = max_t(unsigned int,
3756 sseu->eu_per_subslice, eu_cnt);
3761 static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
3762 struct sseu_dev_info *sseu)
3765 const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
3766 u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
3769 for (s = 0; s < info->sseu.max_slices; s++) {
3771 * FIXME: Valid SS Mask respects the spec and read
3772 * only valid bits for those registers, excluding reserved
3773 * although this seems wrong because it would leave many
3774 * subslices without ACK.
3776 s_reg[s] = I915_READ(GEN10_SLICE_PGCTL_ACK(s)) &
3777 GEN10_PGCTL_VALID_SS_MASK(s);
3778 eu_reg[2 * s] = I915_READ(GEN10_SS01_EU_PGCTL_ACK(s));
3779 eu_reg[2 * s + 1] = I915_READ(GEN10_SS23_EU_PGCTL_ACK(s));
3782 eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
3783 GEN9_PGCTL_SSA_EU19_ACK |
3784 GEN9_PGCTL_SSA_EU210_ACK |
3785 GEN9_PGCTL_SSA_EU311_ACK;
3786 eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
3787 GEN9_PGCTL_SSB_EU19_ACK |
3788 GEN9_PGCTL_SSB_EU210_ACK |
3789 GEN9_PGCTL_SSB_EU311_ACK;
3791 for (s = 0; s < info->sseu.max_slices; s++) {
3792 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
3793 /* skip disabled slice */
3796 sseu->slice_mask |= BIT(s);
3797 sseu->subslice_mask[s] = info->sseu.subslice_mask[s];
3799 for (ss = 0; ss < info->sseu.max_subslices; ss++) {
3800 unsigned int eu_cnt;
3802 if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
3803 /* skip disabled subslice */
3806 eu_cnt = 2 * hweight32(eu_reg[2 * s + ss / 2] &
3808 sseu->eu_total += eu_cnt;
3809 sseu->eu_per_subslice = max_t(unsigned int,
3810 sseu->eu_per_subslice,
3817 static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
3818 struct sseu_dev_info *sseu)
3821 const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
3822 u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
3825 for (s = 0; s < info->sseu.max_slices; s++) {
3826 s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s));
3827 eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s));
3828 eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s));
3831 eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
3832 GEN9_PGCTL_SSA_EU19_ACK |
3833 GEN9_PGCTL_SSA_EU210_ACK |
3834 GEN9_PGCTL_SSA_EU311_ACK;
3835 eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
3836 GEN9_PGCTL_SSB_EU19_ACK |
3837 GEN9_PGCTL_SSB_EU210_ACK |
3838 GEN9_PGCTL_SSB_EU311_ACK;
3840 for (s = 0; s < info->sseu.max_slices; s++) {
3841 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
3842 /* skip disabled slice */
3845 sseu->slice_mask |= BIT(s);
3847 if (IS_GEN9_BC(dev_priv))
3848 sseu->subslice_mask[s] =
3849 RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s];
3851 for (ss = 0; ss < info->sseu.max_subslices; ss++) {
3852 unsigned int eu_cnt;
3854 if (IS_GEN9_LP(dev_priv)) {
3855 if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
3856 /* skip disabled subslice */
3859 sseu->subslice_mask[s] |= BIT(ss);
3862 eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
3864 sseu->eu_total += eu_cnt;
3865 sseu->eu_per_subslice = max_t(unsigned int,
3866 sseu->eu_per_subslice,
3873 static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
3874 struct sseu_dev_info *sseu)
3876 u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
3879 sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK;
3881 if (sseu->slice_mask) {
3882 sseu->eu_per_subslice =
3883 RUNTIME_INFO(dev_priv)->sseu.eu_per_subslice;
3884 for (s = 0; s < fls(sseu->slice_mask); s++) {
3885 sseu->subslice_mask[s] =
3886 RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s];
3888 sseu->eu_total = sseu->eu_per_subslice *
3889 intel_sseu_subslice_total(sseu);
3891 /* subtract fused off EU(s) from enabled slice(s) */
3892 for (s = 0; s < fls(sseu->slice_mask); s++) {
3894 RUNTIME_INFO(dev_priv)->sseu.subslice_7eu[s];
3896 sseu->eu_total -= hweight8(subslice_7eu);
3901 static void i915_print_sseu_info(struct seq_file *m, bool is_available_info,
3902 const struct sseu_dev_info *sseu)
3904 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3905 const char *type = is_available_info ? "Available" : "Enabled";
3908 seq_printf(m, " %s Slice Mask: %04x\n", type,
3910 seq_printf(m, " %s Slice Total: %u\n", type,
3911 hweight8(sseu->slice_mask));
3912 seq_printf(m, " %s Subslice Total: %u\n", type,
3913 intel_sseu_subslice_total(sseu));
3914 for (s = 0; s < fls(sseu->slice_mask); s++) {
3915 seq_printf(m, " %s Slice%i subslices: %u\n", type,
3916 s, intel_sseu_subslices_per_slice(sseu, s));
3918 seq_printf(m, " %s EU Total: %u\n", type,
3920 seq_printf(m, " %s EU Per Subslice: %u\n", type,
3921 sseu->eu_per_subslice);
3923 if (!is_available_info)
3926 seq_printf(m, " Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev_priv)));
3927 if (HAS_POOLED_EU(dev_priv))
3928 seq_printf(m, " Min EU in pool: %u\n", sseu->min_eu_in_pool);
3930 seq_printf(m, " Has Slice Power Gating: %s\n",
3931 yesno(sseu->has_slice_pg));
3932 seq_printf(m, " Has Subslice Power Gating: %s\n",
3933 yesno(sseu->has_subslice_pg));
3934 seq_printf(m, " Has EU Power Gating: %s\n",
3935 yesno(sseu->has_eu_pg));
3938 static int i915_sseu_status(struct seq_file *m, void *unused)
3940 struct drm_i915_private *dev_priv = node_to_i915(m->private);
3941 struct sseu_dev_info sseu;
3942 intel_wakeref_t wakeref;
3944 if (INTEL_GEN(dev_priv) < 8)
3947 seq_puts(m, "SSEU Device Info\n");
3948 i915_print_sseu_info(m, true, &RUNTIME_INFO(dev_priv)->sseu);
3950 seq_puts(m, "SSEU Device Status\n");
3951 memset(&sseu, 0, sizeof(sseu));
3952 sseu.max_slices = RUNTIME_INFO(dev_priv)->sseu.max_slices;
3953 sseu.max_subslices = RUNTIME_INFO(dev_priv)->sseu.max_subslices;
3954 sseu.max_eus_per_subslice =
3955 RUNTIME_INFO(dev_priv)->sseu.max_eus_per_subslice;
3957 with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
3958 if (IS_CHERRYVIEW(dev_priv))
3959 cherryview_sseu_device_status(dev_priv, &sseu);
3960 else if (IS_BROADWELL(dev_priv))
3961 broadwell_sseu_device_status(dev_priv, &sseu);
3962 else if (IS_GEN(dev_priv, 9))
3963 gen9_sseu_device_status(dev_priv, &sseu);
3964 else if (INTEL_GEN(dev_priv) >= 10)
3965 gen10_sseu_device_status(dev_priv, &sseu);
3968 i915_print_sseu_info(m, false, &sseu);
3973 static int i915_forcewake_open(struct inode *inode, struct file *file)
3975 struct drm_i915_private *i915 = inode->i_private;
3977 if (INTEL_GEN(i915) < 6)
3980 file->private_data =
3981 (void *)(uintptr_t)intel_runtime_pm_get(&i915->runtime_pm);
3982 intel_uncore_forcewake_user_get(&i915->uncore);
3987 static int i915_forcewake_release(struct inode *inode, struct file *file)
3989 struct drm_i915_private *i915 = inode->i_private;
3991 if (INTEL_GEN(i915) < 6)
3994 intel_uncore_forcewake_user_put(&i915->uncore);
3995 intel_runtime_pm_put(&i915->runtime_pm,
3996 (intel_wakeref_t)(uintptr_t)file->private_data);
4001 static const struct file_operations i915_forcewake_fops = {
4002 .owner = THIS_MODULE,
4003 .open = i915_forcewake_open,
4004 .release = i915_forcewake_release,
4007 static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
4009 struct drm_i915_private *dev_priv = m->private;
4010 struct i915_hotplug *hotplug = &dev_priv->hotplug;
4012 /* Synchronize with everything first in case there's been an HPD
4013 * storm, but we haven't finished handling it in the kernel yet
4015 intel_synchronize_irq(dev_priv);
4016 flush_work(&dev_priv->hotplug.dig_port_work);
4017 flush_delayed_work(&dev_priv->hotplug.hotplug_work);
4019 seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
4020 seq_printf(m, "Detected: %s\n",
4021 yesno(delayed_work_pending(&hotplug->reenable_work)));
4026 static ssize_t i915_hpd_storm_ctl_write(struct file *file,
4027 const char __user *ubuf, size_t len,
4030 struct seq_file *m = file->private_data;
4031 struct drm_i915_private *dev_priv = m->private;
4032 struct i915_hotplug *hotplug = &dev_priv->hotplug;
4033 unsigned int new_threshold;
4038 if (len >= sizeof(tmp))
4041 if (copy_from_user(tmp, ubuf, len))
4046 /* Strip newline, if any */
4047 newline = strchr(tmp, '\n');
4051 if (strcmp(tmp, "reset") == 0)
4052 new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
4053 else if (kstrtouint(tmp, 10, &new_threshold) != 0)
4056 if (new_threshold > 0)
4057 DRM_DEBUG_KMS("Setting HPD storm detection threshold to %d\n",
4060 DRM_DEBUG_KMS("Disabling HPD storm detection\n");
4062 spin_lock_irq(&dev_priv->irq_lock);
4063 hotplug->hpd_storm_threshold = new_threshold;
4064 /* Reset the HPD storm stats so we don't accidentally trigger a storm */
4066 hotplug->stats[i].count = 0;
4067 spin_unlock_irq(&dev_priv->irq_lock);
4069 /* Re-enable hpd immediately if we were in an irq storm */
4070 flush_delayed_work(&dev_priv->hotplug.reenable_work);
4075 static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
4077 return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
4080 static const struct file_operations i915_hpd_storm_ctl_fops = {
4081 .owner = THIS_MODULE,
4082 .open = i915_hpd_storm_ctl_open,
4084 .llseek = seq_lseek,
4085 .release = single_release,
4086 .write = i915_hpd_storm_ctl_write
4089 static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data)
4091 struct drm_i915_private *dev_priv = m->private;
4093 seq_printf(m, "Enabled: %s\n",
4094 yesno(dev_priv->hotplug.hpd_short_storm_enabled));
4100 i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file)
4102 return single_open(file, i915_hpd_short_storm_ctl_show,
4106 static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
4107 const char __user *ubuf,
4108 size_t len, loff_t *offp)
4110 struct seq_file *m = file->private_data;
4111 struct drm_i915_private *dev_priv = m->private;
4112 struct i915_hotplug *hotplug = &dev_priv->hotplug;
4118 if (len >= sizeof(tmp))
4121 if (copy_from_user(tmp, ubuf, len))
4126 /* Strip newline, if any */
4127 newline = strchr(tmp, '\n');
4131 /* Reset to the "default" state for this system */
4132 if (strcmp(tmp, "reset") == 0)
4133 new_state = !HAS_DP_MST(dev_priv);
4134 else if (kstrtobool(tmp, &new_state) != 0)
4137 DRM_DEBUG_KMS("%sabling HPD short storm detection\n",
4138 new_state ? "En" : "Dis");
4140 spin_lock_irq(&dev_priv->irq_lock);
4141 hotplug->hpd_short_storm_enabled = new_state;
4142 /* Reset the HPD storm stats so we don't accidentally trigger a storm */
4144 hotplug->stats[i].count = 0;
4145 spin_unlock_irq(&dev_priv->irq_lock);
4147 /* Re-enable hpd immediately if we were in an irq storm */
4148 flush_delayed_work(&dev_priv->hotplug.reenable_work);
4153 static const struct file_operations i915_hpd_short_storm_ctl_fops = {
4154 .owner = THIS_MODULE,
4155 .open = i915_hpd_short_storm_ctl_open,
4157 .llseek = seq_lseek,
4158 .release = single_release,
4159 .write = i915_hpd_short_storm_ctl_write,
4162 static int i915_drrs_ctl_set(void *data, u64 val)
4164 struct drm_i915_private *dev_priv = data;
4165 struct drm_device *dev = &dev_priv->drm;
4166 struct intel_crtc *crtc;
4168 if (INTEL_GEN(dev_priv) < 7)
4171 for_each_intel_crtc(dev, crtc) {
4172 struct drm_connector_list_iter conn_iter;
4173 struct intel_crtc_state *crtc_state;
4174 struct drm_connector *connector;
4175 struct drm_crtc_commit *commit;
4178 ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex);
4182 crtc_state = to_intel_crtc_state(crtc->base.state);
4184 if (!crtc_state->base.active ||
4185 !crtc_state->has_drrs)
4188 commit = crtc_state->base.commit;
4190 ret = wait_for_completion_interruptible(&commit->hw_done);
4195 drm_connector_list_iter_begin(dev, &conn_iter);
4196 drm_for_each_connector_iter(connector, &conn_iter) {
4197 struct intel_encoder *encoder;
4198 struct intel_dp *intel_dp;
4200 if (!(crtc_state->base.connector_mask &
4201 drm_connector_mask(connector)))
4204 encoder = intel_attached_encoder(connector);
4205 if (encoder->type != INTEL_OUTPUT_EDP)
4208 DRM_DEBUG_DRIVER("Manually %sabling DRRS. %llu\n",
4209 val ? "en" : "dis", val);
4211 intel_dp = enc_to_intel_dp(&encoder->base);
4213 intel_edp_drrs_enable(intel_dp,
4216 intel_edp_drrs_disable(intel_dp,
4219 drm_connector_list_iter_end(&conn_iter);
4222 drm_modeset_unlock(&crtc->base.mutex);
4230 DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n");
4233 i915_fifo_underrun_reset_write(struct file *filp,
4234 const char __user *ubuf,
4235 size_t cnt, loff_t *ppos)
4237 struct drm_i915_private *dev_priv = filp->private_data;
4238 struct intel_crtc *intel_crtc;
4239 struct drm_device *dev = &dev_priv->drm;
4243 ret = kstrtobool_from_user(ubuf, cnt, &reset);
4250 for_each_intel_crtc(dev, intel_crtc) {
4251 struct drm_crtc_commit *commit;
4252 struct intel_crtc_state *crtc_state;
4254 ret = drm_modeset_lock_single_interruptible(&intel_crtc->base.mutex);
4258 crtc_state = to_intel_crtc_state(intel_crtc->base.state);
4259 commit = crtc_state->base.commit;
4261 ret = wait_for_completion_interruptible(&commit->hw_done);
4263 ret = wait_for_completion_interruptible(&commit->flip_done);
4266 if (!ret && crtc_state->base.active) {
4267 DRM_DEBUG_KMS("Re-arming FIFO underruns on pipe %c\n",
4268 pipe_name(intel_crtc->pipe));
4270 intel_crtc_arm_fifo_underrun(intel_crtc, crtc_state);
4273 drm_modeset_unlock(&intel_crtc->base.mutex);
4279 ret = intel_fbc_reset_underrun(dev_priv);
4286 static const struct file_operations i915_fifo_underrun_reset_ops = {
4287 .owner = THIS_MODULE,
4288 .open = simple_open,
4289 .write = i915_fifo_underrun_reset_write,
4290 .llseek = default_llseek,
4293 static const struct drm_info_list i915_debugfs_list[] = {
4294 {"i915_capabilities", i915_capabilities, 0},
4295 {"i915_gem_objects", i915_gem_object_info, 0},
4296 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
4297 {"i915_gem_interrupt", i915_interrupt_info, 0},
4298 {"i915_guc_info", i915_guc_info, 0},
4299 {"i915_guc_load_status", i915_guc_load_status_info, 0},
4300 {"i915_guc_log_dump", i915_guc_log_dump, 0},
4301 {"i915_guc_load_err_log_dump", i915_guc_log_dump, 0, (void *)1},
4302 {"i915_guc_stage_pool", i915_guc_stage_pool, 0},
4303 {"i915_huc_load_status", i915_huc_load_status_info, 0},
4304 {"i915_frequency_info", i915_frequency_info, 0},
4305 {"i915_hangcheck_info", i915_hangcheck_info, 0},
4306 {"i915_drpc_info", i915_drpc_info, 0},
4307 {"i915_ring_freq_table", i915_ring_freq_table, 0},
4308 {"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
4309 {"i915_fbc_status", i915_fbc_status, 0},
4310 {"i915_ips_status", i915_ips_status, 0},
4311 {"i915_sr_status", i915_sr_status, 0},
4312 {"i915_opregion", i915_opregion, 0},
4313 {"i915_vbt", i915_vbt, 0},
4314 {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
4315 {"i915_context_status", i915_context_status, 0},
4316 {"i915_forcewake_domains", i915_forcewake_domains, 0},
4317 {"i915_swizzle_info", i915_swizzle_info, 0},
4318 {"i915_llc", i915_llc, 0},
4319 {"i915_edp_psr_status", i915_edp_psr_status, 0},
4320 {"i915_energy_uJ", i915_energy_uJ, 0},
4321 {"i915_runtime_pm_status", i915_runtime_pm_status, 0},
4322 {"i915_power_domain_info", i915_power_domain_info, 0},
4323 {"i915_dmc_info", i915_dmc_info, 0},
4324 {"i915_display_info", i915_display_info, 0},
4325 {"i915_engine_info", i915_engine_info, 0},
4326 {"i915_rcs_topology", i915_rcs_topology, 0},
4327 {"i915_shrinker_info", i915_shrinker_info, 0},
4328 {"i915_shared_dplls_info", i915_shared_dplls_info, 0},
4329 {"i915_dp_mst_info", i915_dp_mst_info, 0},
4330 {"i915_wa_registers", i915_wa_registers, 0},
4331 {"i915_ddb_info", i915_ddb_info, 0},
4332 {"i915_sseu_status", i915_sseu_status, 0},
4333 {"i915_drrs_status", i915_drrs_status, 0},
4334 {"i915_rps_boost_info", i915_rps_boost_info, 0},
4336 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
4338 static const struct i915_debugfs_files {
4340 const struct file_operations *fops;
4341 } i915_debugfs_files[] = {
4342 {"i915_wedged", &i915_wedged_fops},
4343 {"i915_cache_sharing", &i915_cache_sharing_fops},
4344 {"i915_gem_drop_caches", &i915_drop_caches_fops},
4345 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
4346 {"i915_error_state", &i915_error_state_fops},
4347 {"i915_gpu_info", &i915_gpu_info_fops},
4349 {"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
4350 {"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
4351 {"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
4352 {"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
4353 {"i915_fbc_false_color", &i915_fbc_false_color_fops},
4354 {"i915_dp_test_data", &i915_displayport_test_data_fops},
4355 {"i915_dp_test_type", &i915_displayport_test_type_fops},
4356 {"i915_dp_test_active", &i915_displayport_test_active_fops},
4357 {"i915_guc_log_level", &i915_guc_log_level_fops},
4358 {"i915_guc_log_relay", &i915_guc_log_relay_fops},
4359 {"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
4360 {"i915_hpd_short_storm_ctl", &i915_hpd_short_storm_ctl_fops},
4361 {"i915_ipc_status", &i915_ipc_status_fops},
4362 {"i915_drrs_ctl", &i915_drrs_ctl_fops},
4363 {"i915_edp_psr_debug", &i915_edp_psr_debug_fops}
4366 int i915_debugfs_register(struct drm_i915_private *dev_priv)
4368 struct drm_minor *minor = dev_priv->drm.primary;
4371 debugfs_create_file("i915_forcewake_user", S_IRUSR, minor->debugfs_root,
4372 to_i915(minor->dev), &i915_forcewake_fops);
4374 for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
4375 debugfs_create_file(i915_debugfs_files[i].name,
4377 minor->debugfs_root,
4378 to_i915(minor->dev),
4379 i915_debugfs_files[i].fops);
4382 return drm_debugfs_create_files(i915_debugfs_list,
4383 I915_DEBUGFS_ENTRIES,
4384 minor->debugfs_root, minor);
4388 /* DPCD dump start address. */
4389 unsigned int offset;
4390 /* DPCD dump end address, inclusive. If unset, .size will be used. */
4392 /* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */
4394 /* Only valid for eDP. */
4398 static const struct dpcd_block i915_dpcd_debug[] = {
4399 { .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE },
4400 { .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS },
4401 { .offset = DP_DOWNSTREAM_PORT_0, .size = 16 },
4402 { .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET },
4403 { .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 },
4404 { .offset = DP_SET_POWER },
4405 { .offset = DP_EDP_DPCD_REV },
4406 { .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 },
4407 { .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB },
4408 { .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET },
4411 static int i915_dpcd_show(struct seq_file *m, void *data)
4413 struct drm_connector *connector = m->private;
4414 struct intel_dp *intel_dp =
4415 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4420 if (connector->status != connector_status_connected)
4423 for (i = 0; i < ARRAY_SIZE(i915_dpcd_debug); i++) {
4424 const struct dpcd_block *b = &i915_dpcd_debug[i];
4425 size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1);
4428 connector->connector_type != DRM_MODE_CONNECTOR_eDP)
4431 /* low tech for now */
4432 if (WARN_ON(size > sizeof(buf)))
4435 err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size);
4437 seq_printf(m, "%04x: ERROR %d\n", b->offset, (int)err);
4439 seq_printf(m, "%04x: %*ph\n", b->offset, (int)err, buf);
4444 DEFINE_SHOW_ATTRIBUTE(i915_dpcd);
4446 static int i915_panel_show(struct seq_file *m, void *data)
4448 struct drm_connector *connector = m->private;
4449 struct intel_dp *intel_dp =
4450 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4452 if (connector->status != connector_status_connected)
4455 seq_printf(m, "Panel power up delay: %d\n",
4456 intel_dp->panel_power_up_delay);
4457 seq_printf(m, "Panel power down delay: %d\n",
4458 intel_dp->panel_power_down_delay);
4459 seq_printf(m, "Backlight on delay: %d\n",
4460 intel_dp->backlight_on_delay);
4461 seq_printf(m, "Backlight off delay: %d\n",
4462 intel_dp->backlight_off_delay);
4466 DEFINE_SHOW_ATTRIBUTE(i915_panel);
4468 static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
4470 struct drm_connector *connector = m->private;
4471 struct intel_connector *intel_connector = to_intel_connector(connector);
4473 if (connector->status != connector_status_connected)
4476 /* HDCP is supported by connector */
4477 if (!intel_connector->hdcp.shim)
4480 seq_printf(m, "%s:%d HDCP version: ", connector->name,
4481 connector->base.id);
4482 intel_hdcp_info(m, intel_connector);
4486 DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
4488 static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
4490 struct drm_connector *connector = m->private;
4491 struct drm_device *dev = connector->dev;
4492 struct drm_crtc *crtc;
4493 struct intel_dp *intel_dp;
4494 struct drm_modeset_acquire_ctx ctx;
4495 struct intel_crtc_state *crtc_state = NULL;
4497 bool try_again = false;
4499 drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
4503 ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
4506 if (ret == -EDEADLK && !drm_modeset_backoff(&ctx)) {
4512 crtc = connector->state->crtc;
4513 if (connector->status != connector_status_connected || !crtc) {
4517 ret = drm_modeset_lock(&crtc->mutex, &ctx);
4518 if (ret == -EDEADLK) {
4519 ret = drm_modeset_backoff(&ctx);
4528 intel_dp = enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4529 crtc_state = to_intel_crtc_state(crtc->state);
4530 seq_printf(m, "DSC_Enabled: %s\n",
4531 yesno(crtc_state->dsc_params.compression_enable));
4532 seq_printf(m, "DSC_Sink_Support: %s\n",
4533 yesno(drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)));
4534 seq_printf(m, "Force_DSC_Enable: %s\n",
4535 yesno(intel_dp->force_dsc_en));
4536 if (!intel_dp_is_edp(intel_dp))
4537 seq_printf(m, "FEC_Sink_Support: %s\n",
4538 yesno(drm_dp_sink_supports_fec(intel_dp->fec_capable)));
4539 } while (try_again);
4541 drm_modeset_drop_locks(&ctx);
4542 drm_modeset_acquire_fini(&ctx);
4547 static ssize_t i915_dsc_fec_support_write(struct file *file,
4548 const char __user *ubuf,
4549 size_t len, loff_t *offp)
4551 bool dsc_enable = false;
4553 struct drm_connector *connector =
4554 ((struct seq_file *)file->private_data)->private;
4555 struct intel_encoder *encoder = intel_attached_encoder(connector);
4556 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
4561 DRM_DEBUG_DRIVER("Copied %zu bytes from user to force DSC\n",
4564 ret = kstrtobool_from_user(ubuf, len, &dsc_enable);
4568 DRM_DEBUG_DRIVER("Got %s for DSC Enable\n",
4569 (dsc_enable) ? "true" : "false");
4570 intel_dp->force_dsc_en = dsc_enable;
4576 static int i915_dsc_fec_support_open(struct inode *inode,
4579 return single_open(file, i915_dsc_fec_support_show,
4583 static const struct file_operations i915_dsc_fec_support_fops = {
4584 .owner = THIS_MODULE,
4585 .open = i915_dsc_fec_support_open,
4587 .llseek = seq_lseek,
4588 .release = single_release,
4589 .write = i915_dsc_fec_support_write
4593 * i915_debugfs_connector_add - add i915 specific connector debugfs files
4594 * @connector: pointer to a registered drm_connector
4596 * Cleanup will be done by drm_connector_unregister() through a call to
4597 * drm_debugfs_connector_remove().
4599 * Returns 0 on success, negative error codes on error.
4601 int i915_debugfs_connector_add(struct drm_connector *connector)
4603 struct dentry *root = connector->debugfs_entry;
4604 struct drm_i915_private *dev_priv = to_i915(connector->dev);
4606 /* The connector must have been registered beforehands. */
4610 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4611 connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4612 debugfs_create_file("i915_dpcd", S_IRUGO, root,
4613 connector, &i915_dpcd_fops);
4615 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
4616 debugfs_create_file("i915_panel_timings", S_IRUGO, root,
4617 connector, &i915_panel_fops);
4618 debugfs_create_file("i915_psr_sink_status", S_IRUGO, root,
4619 connector, &i915_psr_sink_status_fops);
4622 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4623 connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
4624 connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) {
4625 debugfs_create_file("i915_hdcp_sink_capability", S_IRUGO, root,
4626 connector, &i915_hdcp_sink_capability_fops);
4629 if (INTEL_GEN(dev_priv) >= 10 &&
4630 (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4631 connector->connector_type == DRM_MODE_CONNECTOR_eDP))
4632 debugfs_create_file("i915_dsc_fec_support", S_IRUGO, root,
4633 connector, &i915_dsc_fec_support_fops);