]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/gpu/drm/i915/i915_debugfs.c
drm/i915: Use drm_rect to simplify plane {crtc,src}_{x,y,w,h} printing
[linux.git] / drivers / gpu / drm / i915 / i915_debugfs.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Keith Packard <keithp@keithp.com>
26  *
27  */
28
29 #include <linux/sched/mm.h>
30 #include <linux/sort.h>
31
32 #include <drm/drm_debugfs.h>
33 #include <drm/drm_fourcc.h>
34
35 #include "display/intel_display_types.h"
36 #include "display/intel_dp.h"
37 #include "display/intel_fbc.h"
38 #include "display/intel_hdcp.h"
39 #include "display/intel_hdmi.h"
40 #include "display/intel_psr.h"
41
42 #include "gem/i915_gem_context.h"
43 #include "gt/intel_gt_pm.h"
44 #include "gt/intel_gt_requests.h"
45 #include "gt/intel_reset.h"
46 #include "gt/intel_rc6.h"
47 #include "gt/intel_rps.h"
48 #include "gt/uc/intel_guc_submission.h"
49
50 #include "i915_debugfs.h"
51 #include "i915_irq.h"
52 #include "i915_trace.h"
53 #include "intel_csr.h"
54 #include "intel_pm.h"
55 #include "intel_sideband.h"
56
57 static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
58 {
59         return to_i915(node->minor->dev);
60 }
61
62 static int i915_capabilities(struct seq_file *m, void *data)
63 {
64         struct drm_i915_private *dev_priv = node_to_i915(m->private);
65         const struct intel_device_info *info = INTEL_INFO(dev_priv);
66         struct drm_printer p = drm_seq_file_printer(m);
67         const char *msg;
68
69         seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv));
70         seq_printf(m, "platform: %s\n", intel_platform_name(info->platform));
71         seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
72
73         msg = "n/a";
74 #ifdef CONFIG_INTEL_IOMMU
75         msg = enableddisabled(intel_iommu_gfx_mapped);
76 #endif
77         seq_printf(m, "iommu: %s\n", msg);
78
79         intel_device_info_dump_flags(info, &p);
80         intel_device_info_dump_runtime(RUNTIME_INFO(dev_priv), &p);
81         intel_driver_caps_print(&dev_priv->caps, &p);
82
83         kernel_param_lock(THIS_MODULE);
84         i915_params_dump(&i915_modparams, &p);
85         kernel_param_unlock(THIS_MODULE);
86
87         return 0;
88 }
89
90 static char get_tiling_flag(struct drm_i915_gem_object *obj)
91 {
92         switch (i915_gem_object_get_tiling(obj)) {
93         default:
94         case I915_TILING_NONE: return ' ';
95         case I915_TILING_X: return 'X';
96         case I915_TILING_Y: return 'Y';
97         }
98 }
99
100 static char get_global_flag(struct drm_i915_gem_object *obj)
101 {
102         return READ_ONCE(obj->userfault_count) ? 'g' : ' ';
103 }
104
105 static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
106 {
107         return obj->mm.mapping ? 'M' : ' ';
108 }
109
110 static const char *
111 stringify_page_sizes(unsigned int page_sizes, char *buf, size_t len)
112 {
113         size_t x = 0;
114
115         switch (page_sizes) {
116         case 0:
117                 return "";
118         case I915_GTT_PAGE_SIZE_4K:
119                 return "4K";
120         case I915_GTT_PAGE_SIZE_64K:
121                 return "64K";
122         case I915_GTT_PAGE_SIZE_2M:
123                 return "2M";
124         default:
125                 if (!buf)
126                         return "M";
127
128                 if (page_sizes & I915_GTT_PAGE_SIZE_2M)
129                         x += snprintf(buf + x, len - x, "2M, ");
130                 if (page_sizes & I915_GTT_PAGE_SIZE_64K)
131                         x += snprintf(buf + x, len - x, "64K, ");
132                 if (page_sizes & I915_GTT_PAGE_SIZE_4K)
133                         x += snprintf(buf + x, len - x, "4K, ");
134                 buf[x-2] = '\0';
135
136                 return buf;
137         }
138 }
139
140 static void
141 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
142 {
143         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
144         struct intel_engine_cs *engine;
145         struct i915_vma *vma;
146         int pin_count = 0;
147
148         seq_printf(m, "%pK: %c%c%c %8zdKiB %02x %02x %s%s%s",
149                    &obj->base,
150                    get_tiling_flag(obj),
151                    get_global_flag(obj),
152                    get_pin_mapped_flag(obj),
153                    obj->base.size / 1024,
154                    obj->read_domains,
155                    obj->write_domain,
156                    i915_cache_level_str(dev_priv, obj->cache_level),
157                    obj->mm.dirty ? " dirty" : "",
158                    obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
159         if (obj->base.name)
160                 seq_printf(m, " (name: %d)", obj->base.name);
161
162         spin_lock(&obj->vma.lock);
163         list_for_each_entry(vma, &obj->vma.list, obj_link) {
164                 if (!drm_mm_node_allocated(&vma->node))
165                         continue;
166
167                 spin_unlock(&obj->vma.lock);
168
169                 if (i915_vma_is_pinned(vma))
170                         pin_count++;
171
172                 seq_printf(m, " (%sgtt offset: %08llx, size: %08llx, pages: %s",
173                            i915_vma_is_ggtt(vma) ? "g" : "pp",
174                            vma->node.start, vma->node.size,
175                            stringify_page_sizes(vma->page_sizes.gtt, NULL, 0));
176                 if (i915_vma_is_ggtt(vma)) {
177                         switch (vma->ggtt_view.type) {
178                         case I915_GGTT_VIEW_NORMAL:
179                                 seq_puts(m, ", normal");
180                                 break;
181
182                         case I915_GGTT_VIEW_PARTIAL:
183                                 seq_printf(m, ", partial [%08llx+%x]",
184                                            vma->ggtt_view.partial.offset << PAGE_SHIFT,
185                                            vma->ggtt_view.partial.size << PAGE_SHIFT);
186                                 break;
187
188                         case I915_GGTT_VIEW_ROTATED:
189                                 seq_printf(m, ", rotated [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
190                                            vma->ggtt_view.rotated.plane[0].width,
191                                            vma->ggtt_view.rotated.plane[0].height,
192                                            vma->ggtt_view.rotated.plane[0].stride,
193                                            vma->ggtt_view.rotated.plane[0].offset,
194                                            vma->ggtt_view.rotated.plane[1].width,
195                                            vma->ggtt_view.rotated.plane[1].height,
196                                            vma->ggtt_view.rotated.plane[1].stride,
197                                            vma->ggtt_view.rotated.plane[1].offset);
198                                 break;
199
200                         case I915_GGTT_VIEW_REMAPPED:
201                                 seq_printf(m, ", remapped [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
202                                            vma->ggtt_view.remapped.plane[0].width,
203                                            vma->ggtt_view.remapped.plane[0].height,
204                                            vma->ggtt_view.remapped.plane[0].stride,
205                                            vma->ggtt_view.remapped.plane[0].offset,
206                                            vma->ggtt_view.remapped.plane[1].width,
207                                            vma->ggtt_view.remapped.plane[1].height,
208                                            vma->ggtt_view.remapped.plane[1].stride,
209                                            vma->ggtt_view.remapped.plane[1].offset);
210                                 break;
211
212                         default:
213                                 MISSING_CASE(vma->ggtt_view.type);
214                                 break;
215                         }
216                 }
217                 if (vma->fence)
218                         seq_printf(m, " , fence: %d", vma->fence->id);
219                 seq_puts(m, ")");
220
221                 spin_lock(&obj->vma.lock);
222         }
223         spin_unlock(&obj->vma.lock);
224
225         seq_printf(m, " (pinned x %d)", pin_count);
226         if (obj->stolen)
227                 seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
228         if (i915_gem_object_is_framebuffer(obj))
229                 seq_printf(m, " (fb)");
230
231         engine = i915_gem_object_last_write_engine(obj);
232         if (engine)
233                 seq_printf(m, " (%s)", engine->name);
234 }
235
236 struct file_stats {
237         struct i915_address_space *vm;
238         unsigned long count;
239         u64 total, unbound;
240         u64 active, inactive;
241         u64 closed;
242 };
243
244 static int per_file_stats(int id, void *ptr, void *data)
245 {
246         struct drm_i915_gem_object *obj = ptr;
247         struct file_stats *stats = data;
248         struct i915_vma *vma;
249
250         if (!kref_get_unless_zero(&obj->base.refcount))
251                 return 0;
252
253         stats->count++;
254         stats->total += obj->base.size;
255         if (!atomic_read(&obj->bind_count))
256                 stats->unbound += obj->base.size;
257
258         spin_lock(&obj->vma.lock);
259         if (!stats->vm) {
260                 for_each_ggtt_vma(vma, obj) {
261                         if (!drm_mm_node_allocated(&vma->node))
262                                 continue;
263
264                         if (i915_vma_is_active(vma))
265                                 stats->active += vma->node.size;
266                         else
267                                 stats->inactive += vma->node.size;
268
269                         if (i915_vma_is_closed(vma))
270                                 stats->closed += vma->node.size;
271                 }
272         } else {
273                 struct rb_node *p = obj->vma.tree.rb_node;
274
275                 while (p) {
276                         long cmp;
277
278                         vma = rb_entry(p, typeof(*vma), obj_node);
279                         cmp = i915_vma_compare(vma, stats->vm, NULL);
280                         if (cmp == 0) {
281                                 if (drm_mm_node_allocated(&vma->node)) {
282                                         if (i915_vma_is_active(vma))
283                                                 stats->active += vma->node.size;
284                                         else
285                                                 stats->inactive += vma->node.size;
286
287                                         if (i915_vma_is_closed(vma))
288                                                 stats->closed += vma->node.size;
289                                 }
290                                 break;
291                         }
292                         if (cmp < 0)
293                                 p = p->rb_right;
294                         else
295                                 p = p->rb_left;
296                 }
297         }
298         spin_unlock(&obj->vma.lock);
299
300         i915_gem_object_put(obj);
301         return 0;
302 }
303
304 #define print_file_stats(m, name, stats) do { \
305         if (stats.count) \
306                 seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu unbound, %llu closed)\n", \
307                            name, \
308                            stats.count, \
309                            stats.total, \
310                            stats.active, \
311                            stats.inactive, \
312                            stats.unbound, \
313                            stats.closed); \
314 } while (0)
315
316 static void print_context_stats(struct seq_file *m,
317                                 struct drm_i915_private *i915)
318 {
319         struct file_stats kstats = {};
320         struct i915_gem_context *ctx, *cn;
321
322         spin_lock(&i915->gem.contexts.lock);
323         list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) {
324                 struct i915_gem_engines_iter it;
325                 struct intel_context *ce;
326
327                 if (!kref_get_unless_zero(&ctx->ref))
328                         continue;
329
330                 spin_unlock(&i915->gem.contexts.lock);
331
332                 for_each_gem_engine(ce,
333                                     i915_gem_context_lock_engines(ctx), it) {
334                         intel_context_lock_pinned(ce);
335                         if (intel_context_is_pinned(ce)) {
336                                 rcu_read_lock();
337                                 if (ce->state)
338                                         per_file_stats(0,
339                                                        ce->state->obj, &kstats);
340                                 per_file_stats(0, ce->ring->vma->obj, &kstats);
341                                 rcu_read_unlock();
342                         }
343                         intel_context_unlock_pinned(ce);
344                 }
345                 i915_gem_context_unlock_engines(ctx);
346
347                 if (!IS_ERR_OR_NULL(ctx->file_priv)) {
348                         struct file_stats stats = {
349                                 .vm = rcu_access_pointer(ctx->vm),
350                         };
351                         struct drm_file *file = ctx->file_priv->file;
352                         struct task_struct *task;
353                         char name[80];
354
355                         rcu_read_lock();
356                         idr_for_each(&file->object_idr, per_file_stats, &stats);
357                         rcu_read_unlock();
358
359                         rcu_read_lock();
360                         task = pid_task(ctx->pid ?: file->pid, PIDTYPE_PID);
361                         snprintf(name, sizeof(name), "%s",
362                                  task ? task->comm : "<unknown>");
363                         rcu_read_unlock();
364
365                         print_file_stats(m, name, stats);
366                 }
367
368                 spin_lock(&i915->gem.contexts.lock);
369                 list_safe_reset_next(ctx, cn, link);
370                 i915_gem_context_put(ctx);
371         }
372         spin_unlock(&i915->gem.contexts.lock);
373
374         print_file_stats(m, "[k]contexts", kstats);
375 }
376
377 static int i915_gem_object_info(struct seq_file *m, void *data)
378 {
379         struct drm_i915_private *i915 = node_to_i915(m->private);
380
381         seq_printf(m, "%u shrinkable [%u free] objects, %llu bytes\n",
382                    i915->mm.shrink_count,
383                    atomic_read(&i915->mm.free_count),
384                    i915->mm.shrink_memory);
385
386         seq_putc(m, '\n');
387
388         print_context_stats(m, i915);
389
390         return 0;
391 }
392
393 static void gen8_display_interrupt_info(struct seq_file *m)
394 {
395         struct drm_i915_private *dev_priv = node_to_i915(m->private);
396         enum pipe pipe;
397
398         for_each_pipe(dev_priv, pipe) {
399                 enum intel_display_power_domain power_domain;
400                 intel_wakeref_t wakeref;
401
402                 power_domain = POWER_DOMAIN_PIPE(pipe);
403                 wakeref = intel_display_power_get_if_enabled(dev_priv,
404                                                              power_domain);
405                 if (!wakeref) {
406                         seq_printf(m, "Pipe %c power disabled\n",
407                                    pipe_name(pipe));
408                         continue;
409                 }
410                 seq_printf(m, "Pipe %c IMR:\t%08x\n",
411                            pipe_name(pipe),
412                            I915_READ(GEN8_DE_PIPE_IMR(pipe)));
413                 seq_printf(m, "Pipe %c IIR:\t%08x\n",
414                            pipe_name(pipe),
415                            I915_READ(GEN8_DE_PIPE_IIR(pipe)));
416                 seq_printf(m, "Pipe %c IER:\t%08x\n",
417                            pipe_name(pipe),
418                            I915_READ(GEN8_DE_PIPE_IER(pipe)));
419
420                 intel_display_power_put(dev_priv, power_domain, wakeref);
421         }
422
423         seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
424                    I915_READ(GEN8_DE_PORT_IMR));
425         seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
426                    I915_READ(GEN8_DE_PORT_IIR));
427         seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
428                    I915_READ(GEN8_DE_PORT_IER));
429
430         seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
431                    I915_READ(GEN8_DE_MISC_IMR));
432         seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
433                    I915_READ(GEN8_DE_MISC_IIR));
434         seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
435                    I915_READ(GEN8_DE_MISC_IER));
436
437         seq_printf(m, "PCU interrupt mask:\t%08x\n",
438                    I915_READ(GEN8_PCU_IMR));
439         seq_printf(m, "PCU interrupt identity:\t%08x\n",
440                    I915_READ(GEN8_PCU_IIR));
441         seq_printf(m, "PCU interrupt enable:\t%08x\n",
442                    I915_READ(GEN8_PCU_IER));
443 }
444
445 static int i915_interrupt_info(struct seq_file *m, void *data)
446 {
447         struct drm_i915_private *dev_priv = node_to_i915(m->private);
448         struct intel_engine_cs *engine;
449         intel_wakeref_t wakeref;
450         int i, pipe;
451
452         wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
453
454         if (IS_CHERRYVIEW(dev_priv)) {
455                 intel_wakeref_t pref;
456
457                 seq_printf(m, "Master Interrupt Control:\t%08x\n",
458                            I915_READ(GEN8_MASTER_IRQ));
459
460                 seq_printf(m, "Display IER:\t%08x\n",
461                            I915_READ(VLV_IER));
462                 seq_printf(m, "Display IIR:\t%08x\n",
463                            I915_READ(VLV_IIR));
464                 seq_printf(m, "Display IIR_RW:\t%08x\n",
465                            I915_READ(VLV_IIR_RW));
466                 seq_printf(m, "Display IMR:\t%08x\n",
467                            I915_READ(VLV_IMR));
468                 for_each_pipe(dev_priv, pipe) {
469                         enum intel_display_power_domain power_domain;
470
471                         power_domain = POWER_DOMAIN_PIPE(pipe);
472                         pref = intel_display_power_get_if_enabled(dev_priv,
473                                                                   power_domain);
474                         if (!pref) {
475                                 seq_printf(m, "Pipe %c power disabled\n",
476                                            pipe_name(pipe));
477                                 continue;
478                         }
479
480                         seq_printf(m, "Pipe %c stat:\t%08x\n",
481                                    pipe_name(pipe),
482                                    I915_READ(PIPESTAT(pipe)));
483
484                         intel_display_power_put(dev_priv, power_domain, pref);
485                 }
486
487                 pref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
488                 seq_printf(m, "Port hotplug:\t%08x\n",
489                            I915_READ(PORT_HOTPLUG_EN));
490                 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
491                            I915_READ(VLV_DPFLIPSTAT));
492                 seq_printf(m, "DPINVGTT:\t%08x\n",
493                            I915_READ(DPINVGTT));
494                 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, pref);
495
496                 for (i = 0; i < 4; i++) {
497                         seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
498                                    i, I915_READ(GEN8_GT_IMR(i)));
499                         seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
500                                    i, I915_READ(GEN8_GT_IIR(i)));
501                         seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
502                                    i, I915_READ(GEN8_GT_IER(i)));
503                 }
504
505                 seq_printf(m, "PCU interrupt mask:\t%08x\n",
506                            I915_READ(GEN8_PCU_IMR));
507                 seq_printf(m, "PCU interrupt identity:\t%08x\n",
508                            I915_READ(GEN8_PCU_IIR));
509                 seq_printf(m, "PCU interrupt enable:\t%08x\n",
510                            I915_READ(GEN8_PCU_IER));
511         } else if (INTEL_GEN(dev_priv) >= 11) {
512                 seq_printf(m, "Master Interrupt Control:  %08x\n",
513                            I915_READ(GEN11_GFX_MSTR_IRQ));
514
515                 seq_printf(m, "Render/Copy Intr Enable:   %08x\n",
516                            I915_READ(GEN11_RENDER_COPY_INTR_ENABLE));
517                 seq_printf(m, "VCS/VECS Intr Enable:      %08x\n",
518                            I915_READ(GEN11_VCS_VECS_INTR_ENABLE));
519                 seq_printf(m, "GUC/SG Intr Enable:\t   %08x\n",
520                            I915_READ(GEN11_GUC_SG_INTR_ENABLE));
521                 seq_printf(m, "GPM/WGBOXPERF Intr Enable: %08x\n",
522                            I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE));
523                 seq_printf(m, "Crypto Intr Enable:\t   %08x\n",
524                            I915_READ(GEN11_CRYPTO_RSVD_INTR_ENABLE));
525                 seq_printf(m, "GUnit/CSME Intr Enable:\t   %08x\n",
526                            I915_READ(GEN11_GUNIT_CSME_INTR_ENABLE));
527
528                 seq_printf(m, "Display Interrupt Control:\t%08x\n",
529                            I915_READ(GEN11_DISPLAY_INT_CTL));
530
531                 gen8_display_interrupt_info(m);
532         } else if (INTEL_GEN(dev_priv) >= 8) {
533                 seq_printf(m, "Master Interrupt Control:\t%08x\n",
534                            I915_READ(GEN8_MASTER_IRQ));
535
536                 for (i = 0; i < 4; i++) {
537                         seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
538                                    i, I915_READ(GEN8_GT_IMR(i)));
539                         seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
540                                    i, I915_READ(GEN8_GT_IIR(i)));
541                         seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
542                                    i, I915_READ(GEN8_GT_IER(i)));
543                 }
544
545                 gen8_display_interrupt_info(m);
546         } else if (IS_VALLEYVIEW(dev_priv)) {
547                 intel_wakeref_t pref;
548
549                 seq_printf(m, "Display IER:\t%08x\n",
550                            I915_READ(VLV_IER));
551                 seq_printf(m, "Display IIR:\t%08x\n",
552                            I915_READ(VLV_IIR));
553                 seq_printf(m, "Display IIR_RW:\t%08x\n",
554                            I915_READ(VLV_IIR_RW));
555                 seq_printf(m, "Display IMR:\t%08x\n",
556                            I915_READ(VLV_IMR));
557                 for_each_pipe(dev_priv, pipe) {
558                         enum intel_display_power_domain power_domain;
559
560                         power_domain = POWER_DOMAIN_PIPE(pipe);
561                         pref = intel_display_power_get_if_enabled(dev_priv,
562                                                                   power_domain);
563                         if (!pref) {
564                                 seq_printf(m, "Pipe %c power disabled\n",
565                                            pipe_name(pipe));
566                                 continue;
567                         }
568
569                         seq_printf(m, "Pipe %c stat:\t%08x\n",
570                                    pipe_name(pipe),
571                                    I915_READ(PIPESTAT(pipe)));
572                         intel_display_power_put(dev_priv, power_domain, pref);
573                 }
574
575                 seq_printf(m, "Master IER:\t%08x\n",
576                            I915_READ(VLV_MASTER_IER));
577
578                 seq_printf(m, "Render IER:\t%08x\n",
579                            I915_READ(GTIER));
580                 seq_printf(m, "Render IIR:\t%08x\n",
581                            I915_READ(GTIIR));
582                 seq_printf(m, "Render IMR:\t%08x\n",
583                            I915_READ(GTIMR));
584
585                 seq_printf(m, "PM IER:\t\t%08x\n",
586                            I915_READ(GEN6_PMIER));
587                 seq_printf(m, "PM IIR:\t\t%08x\n",
588                            I915_READ(GEN6_PMIIR));
589                 seq_printf(m, "PM IMR:\t\t%08x\n",
590                            I915_READ(GEN6_PMIMR));
591
592                 pref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
593                 seq_printf(m, "Port hotplug:\t%08x\n",
594                            I915_READ(PORT_HOTPLUG_EN));
595                 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
596                            I915_READ(VLV_DPFLIPSTAT));
597                 seq_printf(m, "DPINVGTT:\t%08x\n",
598                            I915_READ(DPINVGTT));
599                 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, pref);
600
601         } else if (!HAS_PCH_SPLIT(dev_priv)) {
602                 seq_printf(m, "Interrupt enable:    %08x\n",
603                            I915_READ(GEN2_IER));
604                 seq_printf(m, "Interrupt identity:  %08x\n",
605                            I915_READ(GEN2_IIR));
606                 seq_printf(m, "Interrupt mask:      %08x\n",
607                            I915_READ(GEN2_IMR));
608                 for_each_pipe(dev_priv, pipe)
609                         seq_printf(m, "Pipe %c stat:         %08x\n",
610                                    pipe_name(pipe),
611                                    I915_READ(PIPESTAT(pipe)));
612         } else {
613                 seq_printf(m, "North Display Interrupt enable:          %08x\n",
614                            I915_READ(DEIER));
615                 seq_printf(m, "North Display Interrupt identity:        %08x\n",
616                            I915_READ(DEIIR));
617                 seq_printf(m, "North Display Interrupt mask:            %08x\n",
618                            I915_READ(DEIMR));
619                 seq_printf(m, "South Display Interrupt enable:          %08x\n",
620                            I915_READ(SDEIER));
621                 seq_printf(m, "South Display Interrupt identity:        %08x\n",
622                            I915_READ(SDEIIR));
623                 seq_printf(m, "South Display Interrupt mask:            %08x\n",
624                            I915_READ(SDEIMR));
625                 seq_printf(m, "Graphics Interrupt enable:               %08x\n",
626                            I915_READ(GTIER));
627                 seq_printf(m, "Graphics Interrupt identity:             %08x\n",
628                            I915_READ(GTIIR));
629                 seq_printf(m, "Graphics Interrupt mask:         %08x\n",
630                            I915_READ(GTIMR));
631         }
632
633         if (INTEL_GEN(dev_priv) >= 11) {
634                 seq_printf(m, "RCS Intr Mask:\t %08x\n",
635                            I915_READ(GEN11_RCS0_RSVD_INTR_MASK));
636                 seq_printf(m, "BCS Intr Mask:\t %08x\n",
637                            I915_READ(GEN11_BCS_RSVD_INTR_MASK));
638                 seq_printf(m, "VCS0/VCS1 Intr Mask:\t %08x\n",
639                            I915_READ(GEN11_VCS0_VCS1_INTR_MASK));
640                 seq_printf(m, "VCS2/VCS3 Intr Mask:\t %08x\n",
641                            I915_READ(GEN11_VCS2_VCS3_INTR_MASK));
642                 seq_printf(m, "VECS0/VECS1 Intr Mask:\t %08x\n",
643                            I915_READ(GEN11_VECS0_VECS1_INTR_MASK));
644                 seq_printf(m, "GUC/SG Intr Mask:\t %08x\n",
645                            I915_READ(GEN11_GUC_SG_INTR_MASK));
646                 seq_printf(m, "GPM/WGBOXPERF Intr Mask: %08x\n",
647                            I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK));
648                 seq_printf(m, "Crypto Intr Mask:\t %08x\n",
649                            I915_READ(GEN11_CRYPTO_RSVD_INTR_MASK));
650                 seq_printf(m, "Gunit/CSME Intr Mask:\t %08x\n",
651                            I915_READ(GEN11_GUNIT_CSME_INTR_MASK));
652
653         } else if (INTEL_GEN(dev_priv) >= 6) {
654                 for_each_uabi_engine(engine, dev_priv) {
655                         seq_printf(m,
656                                    "Graphics Interrupt mask (%s):       %08x\n",
657                                    engine->name, ENGINE_READ(engine, RING_IMR));
658                 }
659         }
660
661         intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
662
663         return 0;
664 }
665
666 static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
667 {
668         struct drm_i915_private *i915 = node_to_i915(m->private);
669         unsigned int i;
670
671         seq_printf(m, "Total fences = %d\n", i915->ggtt.num_fences);
672
673         rcu_read_lock();
674         for (i = 0; i < i915->ggtt.num_fences; i++) {
675                 struct i915_fence_reg *reg = &i915->ggtt.fence_regs[i];
676                 struct i915_vma *vma = reg->vma;
677
678                 seq_printf(m, "Fence %d, pin count = %d, object = ",
679                            i, atomic_read(&reg->pin_count));
680                 if (!vma)
681                         seq_puts(m, "unused");
682                 else
683                         describe_obj(m, vma->obj);
684                 seq_putc(m, '\n');
685         }
686         rcu_read_unlock();
687
688         return 0;
689 }
690
691 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
692 static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
693                               size_t count, loff_t *pos)
694 {
695         struct i915_gpu_state *error;
696         ssize_t ret;
697         void *buf;
698
699         error = file->private_data;
700         if (!error)
701                 return 0;
702
703         /* Bounce buffer required because of kernfs __user API convenience. */
704         buf = kmalloc(count, GFP_KERNEL);
705         if (!buf)
706                 return -ENOMEM;
707
708         ret = i915_gpu_state_copy_to_buffer(error, buf, *pos, count);
709         if (ret <= 0)
710                 goto out;
711
712         if (!copy_to_user(ubuf, buf, ret))
713                 *pos += ret;
714         else
715                 ret = -EFAULT;
716
717 out:
718         kfree(buf);
719         return ret;
720 }
721
722 static int gpu_state_release(struct inode *inode, struct file *file)
723 {
724         i915_gpu_state_put(file->private_data);
725         return 0;
726 }
727
728 static int i915_gpu_info_open(struct inode *inode, struct file *file)
729 {
730         struct drm_i915_private *i915 = inode->i_private;
731         struct i915_gpu_state *gpu;
732         intel_wakeref_t wakeref;
733
734         gpu = NULL;
735         with_intel_runtime_pm(&i915->runtime_pm, wakeref)
736                 gpu = i915_capture_gpu_state(i915);
737         if (IS_ERR(gpu))
738                 return PTR_ERR(gpu);
739
740         file->private_data = gpu;
741         return 0;
742 }
743
744 static const struct file_operations i915_gpu_info_fops = {
745         .owner = THIS_MODULE,
746         .open = i915_gpu_info_open,
747         .read = gpu_state_read,
748         .llseek = default_llseek,
749         .release = gpu_state_release,
750 };
751
752 static ssize_t
753 i915_error_state_write(struct file *filp,
754                        const char __user *ubuf,
755                        size_t cnt,
756                        loff_t *ppos)
757 {
758         struct i915_gpu_state *error = filp->private_data;
759
760         if (!error)
761                 return 0;
762
763         DRM_DEBUG_DRIVER("Resetting error state\n");
764         i915_reset_error_state(error->i915);
765
766         return cnt;
767 }
768
769 static int i915_error_state_open(struct inode *inode, struct file *file)
770 {
771         struct i915_gpu_state *error;
772
773         error = i915_first_error_state(inode->i_private);
774         if (IS_ERR(error))
775                 return PTR_ERR(error);
776
777         file->private_data  = error;
778         return 0;
779 }
780
781 static const struct file_operations i915_error_state_fops = {
782         .owner = THIS_MODULE,
783         .open = i915_error_state_open,
784         .read = gpu_state_read,
785         .write = i915_error_state_write,
786         .llseek = default_llseek,
787         .release = gpu_state_release,
788 };
789 #endif
790
791 static int i915_frequency_info(struct seq_file *m, void *unused)
792 {
793         struct drm_i915_private *dev_priv = node_to_i915(m->private);
794         struct intel_uncore *uncore = &dev_priv->uncore;
795         struct intel_rps *rps = &dev_priv->gt.rps;
796         intel_wakeref_t wakeref;
797         int ret = 0;
798
799         wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
800
801         if (IS_GEN(dev_priv, 5)) {
802                 u16 rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
803                 u16 rgvstat = intel_uncore_read16(uncore, MEMSTAT_ILK);
804
805                 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
806                 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
807                 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
808                            MEMSTAT_VID_SHIFT);
809                 seq_printf(m, "Current P-state: %d\n",
810                            (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
811         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
812                 u32 rpmodectl, freq_sts;
813
814                 rpmodectl = I915_READ(GEN6_RP_CONTROL);
815                 seq_printf(m, "Video Turbo Mode: %s\n",
816                            yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
817                 seq_printf(m, "HW control enabled: %s\n",
818                            yesno(rpmodectl & GEN6_RP_ENABLE));
819                 seq_printf(m, "SW control enabled: %s\n",
820                            yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
821                                   GEN6_RP_MEDIA_SW_MODE));
822
823                 vlv_punit_get(dev_priv);
824                 freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
825                 vlv_punit_put(dev_priv);
826
827                 seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
828                 seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
829
830                 seq_printf(m, "actual GPU freq: %d MHz\n",
831                            intel_gpu_freq(rps, (freq_sts >> 8) & 0xff));
832
833                 seq_printf(m, "current GPU freq: %d MHz\n",
834                            intel_gpu_freq(rps, rps->cur_freq));
835
836                 seq_printf(m, "max GPU freq: %d MHz\n",
837                            intel_gpu_freq(rps, rps->max_freq));
838
839                 seq_printf(m, "min GPU freq: %d MHz\n",
840                            intel_gpu_freq(rps, rps->min_freq));
841
842                 seq_printf(m, "idle GPU freq: %d MHz\n",
843                            intel_gpu_freq(rps, rps->idle_freq));
844
845                 seq_printf(m,
846                            "efficient (RPe) frequency: %d MHz\n",
847                            intel_gpu_freq(rps, rps->efficient_freq));
848         } else if (INTEL_GEN(dev_priv) >= 6) {
849                 u32 rp_state_limits;
850                 u32 gt_perf_status;
851                 u32 rp_state_cap;
852                 u32 rpmodectl, rpinclimit, rpdeclimit;
853                 u32 rpstat, cagf, reqf;
854                 u32 rpupei, rpcurup, rpprevup;
855                 u32 rpdownei, rpcurdown, rpprevdown;
856                 u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
857                 int max_freq;
858
859                 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
860                 if (IS_GEN9_LP(dev_priv)) {
861                         rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
862                         gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
863                 } else {
864                         rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
865                         gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
866                 }
867
868                 /* RPSTAT1 is in the GT power well */
869                 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
870
871                 reqf = I915_READ(GEN6_RPNSWREQ);
872                 if (INTEL_GEN(dev_priv) >= 9)
873                         reqf >>= 23;
874                 else {
875                         reqf &= ~GEN6_TURBO_DISABLE;
876                         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
877                                 reqf >>= 24;
878                         else
879                                 reqf >>= 25;
880                 }
881                 reqf = intel_gpu_freq(rps, reqf);
882
883                 rpmodectl = I915_READ(GEN6_RP_CONTROL);
884                 rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
885                 rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
886
887                 rpstat = I915_READ(GEN6_RPSTAT1);
888                 rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
889                 rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
890                 rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
891                 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
892                 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
893                 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
894                 cagf = intel_gpu_freq(rps, intel_get_cagf(rps, rpstat));
895
896                 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
897
898                 if (INTEL_GEN(dev_priv) >= 11) {
899                         pm_ier = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE);
900                         pm_imr = I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK);
901                         /*
902                          * The equivalent to the PM ISR & IIR cannot be read
903                          * without affecting the current state of the system
904                          */
905                         pm_isr = 0;
906                         pm_iir = 0;
907                 } else if (INTEL_GEN(dev_priv) >= 8) {
908                         pm_ier = I915_READ(GEN8_GT_IER(2));
909                         pm_imr = I915_READ(GEN8_GT_IMR(2));
910                         pm_isr = I915_READ(GEN8_GT_ISR(2));
911                         pm_iir = I915_READ(GEN8_GT_IIR(2));
912                 } else {
913                         pm_ier = I915_READ(GEN6_PMIER);
914                         pm_imr = I915_READ(GEN6_PMIMR);
915                         pm_isr = I915_READ(GEN6_PMISR);
916                         pm_iir = I915_READ(GEN6_PMIIR);
917                 }
918                 pm_mask = I915_READ(GEN6_PMINTRMSK);
919
920                 seq_printf(m, "Video Turbo Mode: %s\n",
921                            yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
922                 seq_printf(m, "HW control enabled: %s\n",
923                            yesno(rpmodectl & GEN6_RP_ENABLE));
924                 seq_printf(m, "SW control enabled: %s\n",
925                            yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
926                                   GEN6_RP_MEDIA_SW_MODE));
927
928                 seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
929                            pm_ier, pm_imr, pm_mask);
930                 if (INTEL_GEN(dev_priv) <= 10)
931                         seq_printf(m, "PM ISR=0x%08x IIR=0x%08x\n",
932                                    pm_isr, pm_iir);
933                 seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
934                            rps->pm_intrmsk_mbz);
935                 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
936                 seq_printf(m, "Render p-state ratio: %d\n",
937                            (gt_perf_status & (INTEL_GEN(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
938                 seq_printf(m, "Render p-state VID: %d\n",
939                            gt_perf_status & 0xff);
940                 seq_printf(m, "Render p-state limit: %d\n",
941                            rp_state_limits & 0xff);
942                 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
943                 seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
944                 seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
945                 seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
946                 seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
947                 seq_printf(m, "CAGF: %dMHz\n", cagf);
948                 seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
949                            rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei));
950                 seq_printf(m, "RP CUR UP: %d (%dus)\n",
951                            rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
952                 seq_printf(m, "RP PREV UP: %d (%dus)\n",
953                            rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
954                 seq_printf(m, "Up threshold: %d%%\n",
955                            rps->power.up_threshold);
956
957                 seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
958                            rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
959                 seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
960                            rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
961                 seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
962                            rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
963                 seq_printf(m, "Down threshold: %d%%\n",
964                            rps->power.down_threshold);
965
966                 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
967                             rp_state_cap >> 16) & 0xff;
968                 max_freq *= (IS_GEN9_BC(dev_priv) ||
969                              INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
970                 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
971                            intel_gpu_freq(rps, max_freq));
972
973                 max_freq = (rp_state_cap & 0xff00) >> 8;
974                 max_freq *= (IS_GEN9_BC(dev_priv) ||
975                              INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
976                 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
977                            intel_gpu_freq(rps, max_freq));
978
979                 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
980                             rp_state_cap >> 0) & 0xff;
981                 max_freq *= (IS_GEN9_BC(dev_priv) ||
982                              INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
983                 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
984                            intel_gpu_freq(rps, max_freq));
985                 seq_printf(m, "Max overclocked frequency: %dMHz\n",
986                            intel_gpu_freq(rps, rps->max_freq));
987
988                 seq_printf(m, "Current freq: %d MHz\n",
989                            intel_gpu_freq(rps, rps->cur_freq));
990                 seq_printf(m, "Actual freq: %d MHz\n", cagf);
991                 seq_printf(m, "Idle freq: %d MHz\n",
992                            intel_gpu_freq(rps, rps->idle_freq));
993                 seq_printf(m, "Min freq: %d MHz\n",
994                            intel_gpu_freq(rps, rps->min_freq));
995                 seq_printf(m, "Boost freq: %d MHz\n",
996                            intel_gpu_freq(rps, rps->boost_freq));
997                 seq_printf(m, "Max freq: %d MHz\n",
998                            intel_gpu_freq(rps, rps->max_freq));
999                 seq_printf(m,
1000                            "efficient (RPe) frequency: %d MHz\n",
1001                            intel_gpu_freq(rps, rps->efficient_freq));
1002         } else {
1003                 seq_puts(m, "no P-state info available\n");
1004         }
1005
1006         seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk.hw.cdclk);
1007         seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
1008         seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
1009
1010         intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1011         return ret;
1012 }
1013
1014 static int ironlake_drpc_info(struct seq_file *m)
1015 {
1016         struct drm_i915_private *i915 = node_to_i915(m->private);
1017         struct intel_uncore *uncore = &i915->uncore;
1018         u32 rgvmodectl, rstdbyctl;
1019         u16 crstandvid;
1020
1021         rgvmodectl = intel_uncore_read(uncore, MEMMODECTL);
1022         rstdbyctl = intel_uncore_read(uncore, RSTDBYCTL);
1023         crstandvid = intel_uncore_read16(uncore, CRSTANDVID);
1024
1025         seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
1026         seq_printf(m, "Boost freq: %d\n",
1027                    (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
1028                    MEMMODE_BOOST_FREQ_SHIFT);
1029         seq_printf(m, "HW control enabled: %s\n",
1030                    yesno(rgvmodectl & MEMMODE_HWIDLE_EN));
1031         seq_printf(m, "SW control enabled: %s\n",
1032                    yesno(rgvmodectl & MEMMODE_SWMODE_EN));
1033         seq_printf(m, "Gated voltage change: %s\n",
1034                    yesno(rgvmodectl & MEMMODE_RCLK_GATE));
1035         seq_printf(m, "Starting frequency: P%d\n",
1036                    (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
1037         seq_printf(m, "Max P-state: P%d\n",
1038                    (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
1039         seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
1040         seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
1041         seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1042         seq_printf(m, "Render standby enabled: %s\n",
1043                    yesno(!(rstdbyctl & RCX_SW_EXIT)));
1044         seq_puts(m, "Current RS state: ");
1045         switch (rstdbyctl & RSX_STATUS_MASK) {
1046         case RSX_STATUS_ON:
1047                 seq_puts(m, "on\n");
1048                 break;
1049         case RSX_STATUS_RC1:
1050                 seq_puts(m, "RC1\n");
1051                 break;
1052         case RSX_STATUS_RC1E:
1053                 seq_puts(m, "RC1E\n");
1054                 break;
1055         case RSX_STATUS_RS1:
1056                 seq_puts(m, "RS1\n");
1057                 break;
1058         case RSX_STATUS_RS2:
1059                 seq_puts(m, "RS2 (RC6)\n");
1060                 break;
1061         case RSX_STATUS_RS3:
1062                 seq_puts(m, "RC3 (RC6+)\n");
1063                 break;
1064         default:
1065                 seq_puts(m, "unknown\n");
1066                 break;
1067         }
1068
1069         return 0;
1070 }
1071
1072 static int i915_forcewake_domains(struct seq_file *m, void *data)
1073 {
1074         struct drm_i915_private *i915 = node_to_i915(m->private);
1075         struct intel_uncore *uncore = &i915->uncore;
1076         struct intel_uncore_forcewake_domain *fw_domain;
1077         unsigned int tmp;
1078
1079         seq_printf(m, "user.bypass_count = %u\n",
1080                    uncore->user_forcewake_count);
1081
1082         for_each_fw_domain(fw_domain, uncore, tmp)
1083                 seq_printf(m, "%s.wake_count = %u\n",
1084                            intel_uncore_forcewake_domain_to_str(fw_domain->id),
1085                            READ_ONCE(fw_domain->wake_count));
1086
1087         return 0;
1088 }
1089
1090 static void print_rc6_res(struct seq_file *m,
1091                           const char *title,
1092                           const i915_reg_t reg)
1093 {
1094         struct drm_i915_private *i915 = node_to_i915(m->private);
1095         intel_wakeref_t wakeref;
1096
1097         with_intel_runtime_pm(&i915->runtime_pm, wakeref)
1098                 seq_printf(m, "%s %u (%llu us)\n", title,
1099                            intel_uncore_read(&i915->uncore, reg),
1100                            intel_rc6_residency_us(&i915->gt.rc6, reg));
1101 }
1102
1103 static int vlv_drpc_info(struct seq_file *m)
1104 {
1105         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1106         u32 rcctl1, pw_status;
1107
1108         pw_status = I915_READ(VLV_GTLC_PW_STATUS);
1109         rcctl1 = I915_READ(GEN6_RC_CONTROL);
1110
1111         seq_printf(m, "RC6 Enabled: %s\n",
1112                    yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
1113                                         GEN6_RC_CTL_EI_MODE(1))));
1114         seq_printf(m, "Render Power Well: %s\n",
1115                    (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
1116         seq_printf(m, "Media Power Well: %s\n",
1117                    (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
1118
1119         print_rc6_res(m, "Render RC6 residency since boot:", VLV_GT_RENDER_RC6);
1120         print_rc6_res(m, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6);
1121
1122         return i915_forcewake_domains(m, NULL);
1123 }
1124
1125 static int gen6_drpc_info(struct seq_file *m)
1126 {
1127         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1128         u32 gt_core_status, rcctl1, rc6vids = 0;
1129         u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
1130
1131         gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS);
1132         trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
1133
1134         rcctl1 = I915_READ(GEN6_RC_CONTROL);
1135         if (INTEL_GEN(dev_priv) >= 9) {
1136                 gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE);
1137                 gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
1138         }
1139
1140         if (INTEL_GEN(dev_priv) <= 7)
1141                 sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS,
1142                                        &rc6vids, NULL);
1143
1144         seq_printf(m, "RC1e Enabled: %s\n",
1145                    yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1146         seq_printf(m, "RC6 Enabled: %s\n",
1147                    yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
1148         if (INTEL_GEN(dev_priv) >= 9) {
1149                 seq_printf(m, "Render Well Gating Enabled: %s\n",
1150                         yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
1151                 seq_printf(m, "Media Well Gating Enabled: %s\n",
1152                         yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE));
1153         }
1154         seq_printf(m, "Deep RC6 Enabled: %s\n",
1155                    yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1156         seq_printf(m, "Deepest RC6 Enabled: %s\n",
1157                    yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
1158         seq_puts(m, "Current RC state: ");
1159         switch (gt_core_status & GEN6_RCn_MASK) {
1160         case GEN6_RC0:
1161                 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
1162                         seq_puts(m, "Core Power Down\n");
1163                 else
1164                         seq_puts(m, "on\n");
1165                 break;
1166         case GEN6_RC3:
1167                 seq_puts(m, "RC3\n");
1168                 break;
1169         case GEN6_RC6:
1170                 seq_puts(m, "RC6\n");
1171                 break;
1172         case GEN6_RC7:
1173                 seq_puts(m, "RC7\n");
1174                 break;
1175         default:
1176                 seq_puts(m, "Unknown\n");
1177                 break;
1178         }
1179
1180         seq_printf(m, "Core Power Down: %s\n",
1181                    yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
1182         if (INTEL_GEN(dev_priv) >= 9) {
1183                 seq_printf(m, "Render Power Well: %s\n",
1184                         (gen9_powergate_status &
1185                          GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
1186                 seq_printf(m, "Media Power Well: %s\n",
1187                         (gen9_powergate_status &
1188                          GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down");
1189         }
1190
1191         /* Not exactly sure what this is */
1192         print_rc6_res(m, "RC6 \"Locked to RPn\" residency since boot:",
1193                       GEN6_GT_GFX_RC6_LOCKED);
1194         print_rc6_res(m, "RC6 residency since boot:", GEN6_GT_GFX_RC6);
1195         print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p);
1196         print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp);
1197
1198         if (INTEL_GEN(dev_priv) <= 7) {
1199                 seq_printf(m, "RC6   voltage: %dmV\n",
1200                            GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
1201                 seq_printf(m, "RC6+  voltage: %dmV\n",
1202                            GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
1203                 seq_printf(m, "RC6++ voltage: %dmV\n",
1204                            GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
1205         }
1206
1207         return i915_forcewake_domains(m, NULL);
1208 }
1209
1210 static int i915_drpc_info(struct seq_file *m, void *unused)
1211 {
1212         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1213         intel_wakeref_t wakeref;
1214         int err = -ENODEV;
1215
1216         with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
1217                 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1218                         err = vlv_drpc_info(m);
1219                 else if (INTEL_GEN(dev_priv) >= 6)
1220                         err = gen6_drpc_info(m);
1221                 else
1222                         err = ironlake_drpc_info(m);
1223         }
1224
1225         return err;
1226 }
1227
1228 static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
1229 {
1230         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1231
1232         seq_printf(m, "FB tracking busy bits: 0x%08x\n",
1233                    dev_priv->fb_tracking.busy_bits);
1234
1235         seq_printf(m, "FB tracking flip bits: 0x%08x\n",
1236                    dev_priv->fb_tracking.flip_bits);
1237
1238         return 0;
1239 }
1240
1241 static int i915_fbc_status(struct seq_file *m, void *unused)
1242 {
1243         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1244         struct intel_fbc *fbc = &dev_priv->fbc;
1245         intel_wakeref_t wakeref;
1246
1247         if (!HAS_FBC(dev_priv))
1248                 return -ENODEV;
1249
1250         wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1251         mutex_lock(&fbc->lock);
1252
1253         if (intel_fbc_is_active(dev_priv))
1254                 seq_puts(m, "FBC enabled\n");
1255         else
1256                 seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
1257
1258         if (intel_fbc_is_active(dev_priv)) {
1259                 u32 mask;
1260
1261                 if (INTEL_GEN(dev_priv) >= 8)
1262                         mask = I915_READ(IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
1263                 else if (INTEL_GEN(dev_priv) >= 7)
1264                         mask = I915_READ(IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
1265                 else if (INTEL_GEN(dev_priv) >= 5)
1266                         mask = I915_READ(ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
1267                 else if (IS_G4X(dev_priv))
1268                         mask = I915_READ(DPFC_STATUS) & DPFC_COMP_SEG_MASK;
1269                 else
1270                         mask = I915_READ(FBC_STATUS) & (FBC_STAT_COMPRESSING |
1271                                                         FBC_STAT_COMPRESSED);
1272
1273                 seq_printf(m, "Compressing: %s\n", yesno(mask));
1274         }
1275
1276         mutex_unlock(&fbc->lock);
1277         intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1278
1279         return 0;
1280 }
1281
1282 static int i915_fbc_false_color_get(void *data, u64 *val)
1283 {
1284         struct drm_i915_private *dev_priv = data;
1285
1286         if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
1287                 return -ENODEV;
1288
1289         *val = dev_priv->fbc.false_color;
1290
1291         return 0;
1292 }
1293
1294 static int i915_fbc_false_color_set(void *data, u64 val)
1295 {
1296         struct drm_i915_private *dev_priv = data;
1297         u32 reg;
1298
1299         if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
1300                 return -ENODEV;
1301
1302         mutex_lock(&dev_priv->fbc.lock);
1303
1304         reg = I915_READ(ILK_DPFC_CONTROL);
1305         dev_priv->fbc.false_color = val;
1306
1307         I915_WRITE(ILK_DPFC_CONTROL, val ?
1308                    (reg | FBC_CTL_FALSE_COLOR) :
1309                    (reg & ~FBC_CTL_FALSE_COLOR));
1310
1311         mutex_unlock(&dev_priv->fbc.lock);
1312         return 0;
1313 }
1314
1315 DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
1316                         i915_fbc_false_color_get, i915_fbc_false_color_set,
1317                         "%llu\n");
1318
1319 static int i915_ips_status(struct seq_file *m, void *unused)
1320 {
1321         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1322         intel_wakeref_t wakeref;
1323
1324         if (!HAS_IPS(dev_priv))
1325                 return -ENODEV;
1326
1327         wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1328
1329         seq_printf(m, "Enabled by kernel parameter: %s\n",
1330                    yesno(i915_modparams.enable_ips));
1331
1332         if (INTEL_GEN(dev_priv) >= 8) {
1333                 seq_puts(m, "Currently: unknown\n");
1334         } else {
1335                 if (I915_READ(IPS_CTL) & IPS_ENABLE)
1336                         seq_puts(m, "Currently: enabled\n");
1337                 else
1338                         seq_puts(m, "Currently: disabled\n");
1339         }
1340
1341         intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1342
1343         return 0;
1344 }
1345
1346 static int i915_sr_status(struct seq_file *m, void *unused)
1347 {
1348         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1349         intel_wakeref_t wakeref;
1350         bool sr_enabled = false;
1351
1352         wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
1353
1354         if (INTEL_GEN(dev_priv) >= 9)
1355                 /* no global SR status; inspect per-plane WM */;
1356         else if (HAS_PCH_SPLIT(dev_priv))
1357                 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
1358         else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
1359                  IS_I945G(dev_priv) || IS_I945GM(dev_priv))
1360                 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
1361         else if (IS_I915GM(dev_priv))
1362                 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
1363         else if (IS_PINEVIEW(dev_priv))
1364                 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
1365         else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1366                 sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
1367
1368         intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
1369
1370         seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
1371
1372         return 0;
1373 }
1374
1375 static int i915_ring_freq_table(struct seq_file *m, void *unused)
1376 {
1377         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1378         struct intel_rps *rps = &dev_priv->gt.rps;
1379         unsigned int max_gpu_freq, min_gpu_freq;
1380         intel_wakeref_t wakeref;
1381         int gpu_freq, ia_freq;
1382
1383         if (!HAS_LLC(dev_priv))
1384                 return -ENODEV;
1385
1386         min_gpu_freq = rps->min_freq;
1387         max_gpu_freq = rps->max_freq;
1388         if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
1389                 /* Convert GT frequency to 50 HZ units */
1390                 min_gpu_freq /= GEN9_FREQ_SCALER;
1391                 max_gpu_freq /= GEN9_FREQ_SCALER;
1392         }
1393
1394         seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
1395
1396         wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1397         for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
1398                 ia_freq = gpu_freq;
1399                 sandybridge_pcode_read(dev_priv,
1400                                        GEN6_PCODE_READ_MIN_FREQ_TABLE,
1401                                        &ia_freq, NULL);
1402                 seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
1403                            intel_gpu_freq(rps,
1404                                           (gpu_freq *
1405                                            (IS_GEN9_BC(dev_priv) ||
1406                                             INTEL_GEN(dev_priv) >= 10 ?
1407                                             GEN9_FREQ_SCALER : 1))),
1408                            ((ia_freq >> 0) & 0xff) * 100,
1409                            ((ia_freq >> 8) & 0xff) * 100);
1410         }
1411         intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1412
1413         return 0;
1414 }
1415
1416 static int i915_opregion(struct seq_file *m, void *unused)
1417 {
1418         struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
1419
1420         if (opregion->header)
1421                 seq_write(m, opregion->header, OPREGION_SIZE);
1422
1423         return 0;
1424 }
1425
1426 static int i915_vbt(struct seq_file *m, void *unused)
1427 {
1428         struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
1429
1430         if (opregion->vbt)
1431                 seq_write(m, opregion->vbt, opregion->vbt_size);
1432
1433         return 0;
1434 }
1435
1436 static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1437 {
1438         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1439         struct drm_device *dev = &dev_priv->drm;
1440         struct intel_framebuffer *fbdev_fb = NULL;
1441         struct drm_framebuffer *drm_fb;
1442
1443 #ifdef CONFIG_DRM_FBDEV_EMULATION
1444         if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
1445                 fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
1446
1447                 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1448                            fbdev_fb->base.width,
1449                            fbdev_fb->base.height,
1450                            fbdev_fb->base.format->depth,
1451                            fbdev_fb->base.format->cpp[0] * 8,
1452                            fbdev_fb->base.modifier,
1453                            drm_framebuffer_read_refcount(&fbdev_fb->base));
1454                 describe_obj(m, intel_fb_obj(&fbdev_fb->base));
1455                 seq_putc(m, '\n');
1456         }
1457 #endif
1458
1459         mutex_lock(&dev->mode_config.fb_lock);
1460         drm_for_each_fb(drm_fb, dev) {
1461                 struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
1462                 if (fb == fbdev_fb)
1463                         continue;
1464
1465                 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1466                            fb->base.width,
1467                            fb->base.height,
1468                            fb->base.format->depth,
1469                            fb->base.format->cpp[0] * 8,
1470                            fb->base.modifier,
1471                            drm_framebuffer_read_refcount(&fb->base));
1472                 describe_obj(m, intel_fb_obj(&fb->base));
1473                 seq_putc(m, '\n');
1474         }
1475         mutex_unlock(&dev->mode_config.fb_lock);
1476
1477         return 0;
1478 }
1479
1480 static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
1481 {
1482         seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, emit: %u)",
1483                    ring->space, ring->head, ring->tail, ring->emit);
1484 }
1485
1486 static int i915_context_status(struct seq_file *m, void *unused)
1487 {
1488         struct drm_i915_private *i915 = node_to_i915(m->private);
1489         struct i915_gem_context *ctx, *cn;
1490
1491         spin_lock(&i915->gem.contexts.lock);
1492         list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) {
1493                 struct i915_gem_engines_iter it;
1494                 struct intel_context *ce;
1495
1496                 if (!kref_get_unless_zero(&ctx->ref))
1497                         continue;
1498
1499                 spin_unlock(&i915->gem.contexts.lock);
1500
1501                 seq_puts(m, "HW context ");
1502                 if (ctx->pid) {
1503                         struct task_struct *task;
1504
1505                         task = get_pid_task(ctx->pid, PIDTYPE_PID);
1506                         if (task) {
1507                                 seq_printf(m, "(%s [%d]) ",
1508                                            task->comm, task->pid);
1509                                 put_task_struct(task);
1510                         }
1511                 } else if (IS_ERR(ctx->file_priv)) {
1512                         seq_puts(m, "(deleted) ");
1513                 } else {
1514                         seq_puts(m, "(kernel) ");
1515                 }
1516
1517                 seq_putc(m, ctx->remap_slice ? 'R' : 'r');
1518                 seq_putc(m, '\n');
1519
1520                 for_each_gem_engine(ce,
1521                                     i915_gem_context_lock_engines(ctx), it) {
1522                         intel_context_lock_pinned(ce);
1523                         if (intel_context_is_pinned(ce)) {
1524                                 seq_printf(m, "%s: ", ce->engine->name);
1525                                 if (ce->state)
1526                                         describe_obj(m, ce->state->obj);
1527                                 describe_ctx_ring(m, ce->ring);
1528                                 seq_putc(m, '\n');
1529                         }
1530                         intel_context_unlock_pinned(ce);
1531                 }
1532                 i915_gem_context_unlock_engines(ctx);
1533
1534                 seq_putc(m, '\n');
1535
1536                 spin_lock(&i915->gem.contexts.lock);
1537                 list_safe_reset_next(ctx, cn, link);
1538                 i915_gem_context_put(ctx);
1539         }
1540         spin_unlock(&i915->gem.contexts.lock);
1541
1542         return 0;
1543 }
1544
1545 static const char *swizzle_string(unsigned swizzle)
1546 {
1547         switch (swizzle) {
1548         case I915_BIT_6_SWIZZLE_NONE:
1549                 return "none";
1550         case I915_BIT_6_SWIZZLE_9:
1551                 return "bit9";
1552         case I915_BIT_6_SWIZZLE_9_10:
1553                 return "bit9/bit10";
1554         case I915_BIT_6_SWIZZLE_9_11:
1555                 return "bit9/bit11";
1556         case I915_BIT_6_SWIZZLE_9_10_11:
1557                 return "bit9/bit10/bit11";
1558         case I915_BIT_6_SWIZZLE_9_17:
1559                 return "bit9/bit17";
1560         case I915_BIT_6_SWIZZLE_9_10_17:
1561                 return "bit9/bit10/bit17";
1562         case I915_BIT_6_SWIZZLE_UNKNOWN:
1563                 return "unknown";
1564         }
1565
1566         return "bug";
1567 }
1568
1569 static int i915_swizzle_info(struct seq_file *m, void *data)
1570 {
1571         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1572         struct intel_uncore *uncore = &dev_priv->uncore;
1573         intel_wakeref_t wakeref;
1574
1575         wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
1576
1577         seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
1578                    swizzle_string(dev_priv->ggtt.bit_6_swizzle_x));
1579         seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
1580                    swizzle_string(dev_priv->ggtt.bit_6_swizzle_y));
1581
1582         if (IS_GEN_RANGE(dev_priv, 3, 4)) {
1583                 seq_printf(m, "DDC = 0x%08x\n",
1584                            intel_uncore_read(uncore, DCC));
1585                 seq_printf(m, "DDC2 = 0x%08x\n",
1586                            intel_uncore_read(uncore, DCC2));
1587                 seq_printf(m, "C0DRB3 = 0x%04x\n",
1588                            intel_uncore_read16(uncore, C0DRB3));
1589                 seq_printf(m, "C1DRB3 = 0x%04x\n",
1590                            intel_uncore_read16(uncore, C1DRB3));
1591         } else if (INTEL_GEN(dev_priv) >= 6) {
1592                 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
1593                            intel_uncore_read(uncore, MAD_DIMM_C0));
1594                 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
1595                            intel_uncore_read(uncore, MAD_DIMM_C1));
1596                 seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
1597                            intel_uncore_read(uncore, MAD_DIMM_C2));
1598                 seq_printf(m, "TILECTL = 0x%08x\n",
1599                            intel_uncore_read(uncore, TILECTL));
1600                 if (INTEL_GEN(dev_priv) >= 8)
1601                         seq_printf(m, "GAMTARBMODE = 0x%08x\n",
1602                                    intel_uncore_read(uncore, GAMTARBMODE));
1603                 else
1604                         seq_printf(m, "ARB_MODE = 0x%08x\n",
1605                                    intel_uncore_read(uncore, ARB_MODE));
1606                 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
1607                            intel_uncore_read(uncore, DISP_ARB_CTL));
1608         }
1609
1610         if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
1611                 seq_puts(m, "L-shaped memory detected\n");
1612
1613         intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
1614
1615         return 0;
1616 }
1617
1618 static const char *rps_power_to_str(unsigned int power)
1619 {
1620         static const char * const strings[] = {
1621                 [LOW_POWER] = "low power",
1622                 [BETWEEN] = "mixed",
1623                 [HIGH_POWER] = "high power",
1624         };
1625
1626         if (power >= ARRAY_SIZE(strings) || !strings[power])
1627                 return "unknown";
1628
1629         return strings[power];
1630 }
1631
1632 static int i915_rps_boost_info(struct seq_file *m, void *data)
1633 {
1634         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1635         struct intel_rps *rps = &dev_priv->gt.rps;
1636         u32 act_freq = rps->cur_freq;
1637         intel_wakeref_t wakeref;
1638
1639         with_intel_runtime_pm_if_in_use(&dev_priv->runtime_pm, wakeref) {
1640                 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1641                         vlv_punit_get(dev_priv);
1642                         act_freq = vlv_punit_read(dev_priv,
1643                                                   PUNIT_REG_GPU_FREQ_STS);
1644                         vlv_punit_put(dev_priv);
1645                         act_freq = (act_freq >> 8) & 0xff;
1646                 } else {
1647                         act_freq = intel_get_cagf(rps,
1648                                                   I915_READ(GEN6_RPSTAT1));
1649                 }
1650         }
1651
1652         seq_printf(m, "RPS enabled? %d\n", rps->enabled);
1653         seq_printf(m, "GPU busy? %s\n", yesno(dev_priv->gt.awake));
1654         seq_printf(m, "Boosts outstanding? %d\n",
1655                    atomic_read(&rps->num_waiters));
1656         seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
1657         seq_printf(m, "Frequency requested %d, actual %d\n",
1658                    intel_gpu_freq(rps, rps->cur_freq),
1659                    intel_gpu_freq(rps, act_freq));
1660         seq_printf(m, "  min hard:%d, soft:%d; max soft:%d, hard:%d\n",
1661                    intel_gpu_freq(rps, rps->min_freq),
1662                    intel_gpu_freq(rps, rps->min_freq_softlimit),
1663                    intel_gpu_freq(rps, rps->max_freq_softlimit),
1664                    intel_gpu_freq(rps, rps->max_freq));
1665         seq_printf(m, "  idle:%d, efficient:%d, boost:%d\n",
1666                    intel_gpu_freq(rps, rps->idle_freq),
1667                    intel_gpu_freq(rps, rps->efficient_freq),
1668                    intel_gpu_freq(rps, rps->boost_freq));
1669
1670         seq_printf(m, "Wait boosts: %d\n", atomic_read(&rps->boosts));
1671
1672         if (INTEL_GEN(dev_priv) >= 6 && rps->enabled && dev_priv->gt.awake) {
1673                 u32 rpup, rpupei;
1674                 u32 rpdown, rpdownei;
1675
1676                 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
1677                 rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
1678                 rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
1679                 rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
1680                 rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
1681                 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
1682
1683                 seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
1684                            rps_power_to_str(rps->power.mode));
1685                 seq_printf(m, "  Avg. up: %d%% [above threshold? %d%%]\n",
1686                            rpup && rpupei ? 100 * rpup / rpupei : 0,
1687                            rps->power.up_threshold);
1688                 seq_printf(m, "  Avg. down: %d%% [below threshold? %d%%]\n",
1689                            rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
1690                            rps->power.down_threshold);
1691         } else {
1692                 seq_puts(m, "\nRPS Autotuning inactive\n");
1693         }
1694
1695         return 0;
1696 }
1697
1698 static int i915_llc(struct seq_file *m, void *data)
1699 {
1700         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1701         const bool edram = INTEL_GEN(dev_priv) > 8;
1702
1703         seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv)));
1704         seq_printf(m, "%s: %uMB\n", edram ? "eDRAM" : "eLLC",
1705                    dev_priv->edram_size_mb);
1706
1707         return 0;
1708 }
1709
1710 static int i915_huc_load_status_info(struct seq_file *m, void *data)
1711 {
1712         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1713         intel_wakeref_t wakeref;
1714         struct drm_printer p;
1715
1716         if (!HAS_GT_UC(dev_priv))
1717                 return -ENODEV;
1718
1719         p = drm_seq_file_printer(m);
1720         intel_uc_fw_dump(&dev_priv->gt.uc.huc.fw, &p);
1721
1722         with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
1723                 seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
1724
1725         return 0;
1726 }
1727
1728 static int i915_guc_load_status_info(struct seq_file *m, void *data)
1729 {
1730         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1731         intel_wakeref_t wakeref;
1732         struct drm_printer p;
1733
1734         if (!HAS_GT_UC(dev_priv))
1735                 return -ENODEV;
1736
1737         p = drm_seq_file_printer(m);
1738         intel_uc_fw_dump(&dev_priv->gt.uc.guc.fw, &p);
1739
1740         with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
1741                 u32 tmp = I915_READ(GUC_STATUS);
1742                 u32 i;
1743
1744                 seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
1745                 seq_printf(m, "\tBootrom status = 0x%x\n",
1746                            (tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
1747                 seq_printf(m, "\tuKernel status = 0x%x\n",
1748                            (tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
1749                 seq_printf(m, "\tMIA Core status = 0x%x\n",
1750                            (tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
1751                 seq_puts(m, "\nScratch registers:\n");
1752                 for (i = 0; i < 16; i++) {
1753                         seq_printf(m, "\t%2d: \t0x%x\n",
1754                                    i, I915_READ(SOFT_SCRATCH(i)));
1755                 }
1756         }
1757
1758         return 0;
1759 }
1760
1761 static const char *
1762 stringify_guc_log_type(enum guc_log_buffer_type type)
1763 {
1764         switch (type) {
1765         case GUC_ISR_LOG_BUFFER:
1766                 return "ISR";
1767         case GUC_DPC_LOG_BUFFER:
1768                 return "DPC";
1769         case GUC_CRASH_DUMP_LOG_BUFFER:
1770                 return "CRASH";
1771         default:
1772                 MISSING_CASE(type);
1773         }
1774
1775         return "";
1776 }
1777
1778 static void i915_guc_log_info(struct seq_file *m,
1779                               struct drm_i915_private *dev_priv)
1780 {
1781         struct intel_guc_log *log = &dev_priv->gt.uc.guc.log;
1782         enum guc_log_buffer_type type;
1783
1784         if (!intel_guc_log_relay_created(log)) {
1785                 seq_puts(m, "GuC log relay not created\n");
1786                 return;
1787         }
1788
1789         seq_puts(m, "GuC logging stats:\n");
1790
1791         seq_printf(m, "\tRelay full count: %u\n",
1792                    log->relay.full_count);
1793
1794         for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
1795                 seq_printf(m, "\t%s:\tflush count %10u, overflow count %10u\n",
1796                            stringify_guc_log_type(type),
1797                            log->stats[type].flush,
1798                            log->stats[type].sampled_overflow);
1799         }
1800 }
1801
1802 static int i915_guc_info(struct seq_file *m, void *data)
1803 {
1804         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1805         const struct intel_guc *guc = &dev_priv->gt.uc.guc;
1806         struct intel_guc_client *client = guc->execbuf_client;
1807
1808         if (!USES_GUC(dev_priv))
1809                 return -ENODEV;
1810
1811         i915_guc_log_info(m, dev_priv);
1812
1813         if (!USES_GUC_SUBMISSION(dev_priv))
1814                 return 0;
1815
1816         GEM_BUG_ON(!guc->execbuf_client);
1817
1818         seq_printf(m, "\nDoorbell map:\n");
1819         seq_printf(m, "\t%*pb\n", GUC_NUM_DOORBELLS, guc->doorbell_bitmap);
1820         seq_printf(m, "Doorbell next cacheline: 0x%x\n", guc->db_cacheline);
1821
1822         seq_printf(m, "\nGuC execbuf client @ %p:\n", client);
1823         seq_printf(m, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n",
1824                    client->priority,
1825                    client->stage_id,
1826                    client->proc_desc_offset);
1827         seq_printf(m, "\tDoorbell id %d, offset: 0x%lx\n",
1828                    client->doorbell_id, client->doorbell_offset);
1829         /* Add more as required ... */
1830
1831         return 0;
1832 }
1833
1834 static int i915_guc_stage_pool(struct seq_file *m, void *data)
1835 {
1836         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1837         const struct intel_guc *guc = &dev_priv->gt.uc.guc;
1838         struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr;
1839         int index;
1840
1841         if (!USES_GUC_SUBMISSION(dev_priv))
1842                 return -ENODEV;
1843
1844         for (index = 0; index < GUC_MAX_STAGE_DESCRIPTORS; index++, desc++) {
1845                 struct intel_engine_cs *engine;
1846
1847                 if (!(desc->attribute & GUC_STAGE_DESC_ATTR_ACTIVE))
1848                         continue;
1849
1850                 seq_printf(m, "GuC stage descriptor %u:\n", index);
1851                 seq_printf(m, "\tIndex: %u\n", desc->stage_id);
1852                 seq_printf(m, "\tAttribute: 0x%x\n", desc->attribute);
1853                 seq_printf(m, "\tPriority: %d\n", desc->priority);
1854                 seq_printf(m, "\tDoorbell id: %d\n", desc->db_id);
1855                 seq_printf(m, "\tEngines used: 0x%x\n",
1856                            desc->engines_used);
1857                 seq_printf(m, "\tDoorbell trigger phy: 0x%llx, cpu: 0x%llx, uK: 0x%x\n",
1858                            desc->db_trigger_phy,
1859                            desc->db_trigger_cpu,
1860                            desc->db_trigger_uk);
1861                 seq_printf(m, "\tProcess descriptor: 0x%x\n",
1862                            desc->process_desc);
1863                 seq_printf(m, "\tWorkqueue address: 0x%x, size: 0x%x\n",
1864                            desc->wq_addr, desc->wq_size);
1865                 seq_putc(m, '\n');
1866
1867                 for_each_uabi_engine(engine, dev_priv) {
1868                         u32 guc_engine_id = engine->guc_id;
1869                         struct guc_execlist_context *lrc =
1870                                                 &desc->lrc[guc_engine_id];
1871
1872                         seq_printf(m, "\t%s LRC:\n", engine->name);
1873                         seq_printf(m, "\t\tContext desc: 0x%x\n",
1874                                    lrc->context_desc);
1875                         seq_printf(m, "\t\tContext id: 0x%x\n", lrc->context_id);
1876                         seq_printf(m, "\t\tLRCA: 0x%x\n", lrc->ring_lrca);
1877                         seq_printf(m, "\t\tRing begin: 0x%x\n", lrc->ring_begin);
1878                         seq_printf(m, "\t\tRing end: 0x%x\n", lrc->ring_end);
1879                         seq_putc(m, '\n');
1880                 }
1881         }
1882
1883         return 0;
1884 }
1885
1886 static int i915_guc_log_dump(struct seq_file *m, void *data)
1887 {
1888         struct drm_info_node *node = m->private;
1889         struct drm_i915_private *dev_priv = node_to_i915(node);
1890         bool dump_load_err = !!node->info_ent->data;
1891         struct drm_i915_gem_object *obj = NULL;
1892         u32 *log;
1893         int i = 0;
1894
1895         if (!HAS_GT_UC(dev_priv))
1896                 return -ENODEV;
1897
1898         if (dump_load_err)
1899                 obj = dev_priv->gt.uc.load_err_log;
1900         else if (dev_priv->gt.uc.guc.log.vma)
1901                 obj = dev_priv->gt.uc.guc.log.vma->obj;
1902
1903         if (!obj)
1904                 return 0;
1905
1906         log = i915_gem_object_pin_map(obj, I915_MAP_WC);
1907         if (IS_ERR(log)) {
1908                 DRM_DEBUG("Failed to pin object\n");
1909                 seq_puts(m, "(log data unaccessible)\n");
1910                 return PTR_ERR(log);
1911         }
1912
1913         for (i = 0; i < obj->base.size / sizeof(u32); i += 4)
1914                 seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n",
1915                            *(log + i), *(log + i + 1),
1916                            *(log + i + 2), *(log + i + 3));
1917
1918         seq_putc(m, '\n');
1919
1920         i915_gem_object_unpin_map(obj);
1921
1922         return 0;
1923 }
1924
1925 static int i915_guc_log_level_get(void *data, u64 *val)
1926 {
1927         struct drm_i915_private *dev_priv = data;
1928
1929         if (!USES_GUC(dev_priv))
1930                 return -ENODEV;
1931
1932         *val = intel_guc_log_get_level(&dev_priv->gt.uc.guc.log);
1933
1934         return 0;
1935 }
1936
1937 static int i915_guc_log_level_set(void *data, u64 val)
1938 {
1939         struct drm_i915_private *dev_priv = data;
1940
1941         if (!USES_GUC(dev_priv))
1942                 return -ENODEV;
1943
1944         return intel_guc_log_set_level(&dev_priv->gt.uc.guc.log, val);
1945 }
1946
1947 DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_level_fops,
1948                         i915_guc_log_level_get, i915_guc_log_level_set,
1949                         "%lld\n");
1950
1951 static int i915_guc_log_relay_open(struct inode *inode, struct file *file)
1952 {
1953         struct drm_i915_private *i915 = inode->i_private;
1954         struct intel_guc *guc = &i915->gt.uc.guc;
1955         struct intel_guc_log *log = &guc->log;
1956
1957         if (!intel_guc_is_running(guc))
1958                 return -ENODEV;
1959
1960         file->private_data = log;
1961
1962         return intel_guc_log_relay_open(log);
1963 }
1964
1965 static ssize_t
1966 i915_guc_log_relay_write(struct file *filp,
1967                          const char __user *ubuf,
1968                          size_t cnt,
1969                          loff_t *ppos)
1970 {
1971         struct intel_guc_log *log = filp->private_data;
1972         int val;
1973         int ret;
1974
1975         ret = kstrtoint_from_user(ubuf, cnt, 0, &val);
1976         if (ret < 0)
1977                 return ret;
1978
1979         /*
1980          * Enable and start the guc log relay on value of 1.
1981          * Flush log relay for any other value.
1982          */
1983         if (val == 1)
1984                 ret = intel_guc_log_relay_start(log);
1985         else
1986                 intel_guc_log_relay_flush(log);
1987
1988         return ret ?: cnt;
1989 }
1990
1991 static int i915_guc_log_relay_release(struct inode *inode, struct file *file)
1992 {
1993         struct drm_i915_private *i915 = inode->i_private;
1994         struct intel_guc *guc = &i915->gt.uc.guc;
1995
1996         intel_guc_log_relay_close(&guc->log);
1997         return 0;
1998 }
1999
2000 static const struct file_operations i915_guc_log_relay_fops = {
2001         .owner = THIS_MODULE,
2002         .open = i915_guc_log_relay_open,
2003         .write = i915_guc_log_relay_write,
2004         .release = i915_guc_log_relay_release,
2005 };
2006
2007 static int i915_psr_sink_status_show(struct seq_file *m, void *data)
2008 {
2009         u8 val;
2010         static const char * const sink_status[] = {
2011                 "inactive",
2012                 "transition to active, capture and display",
2013                 "active, display from RFB",
2014                 "active, capture and display on sink device timings",
2015                 "transition to inactive, capture and display, timing re-sync",
2016                 "reserved",
2017                 "reserved",
2018                 "sink internal error",
2019         };
2020         struct drm_connector *connector = m->private;
2021         struct drm_i915_private *dev_priv = to_i915(connector->dev);
2022         struct intel_dp *intel_dp =
2023                 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
2024         int ret;
2025
2026         if (!CAN_PSR(dev_priv)) {
2027                 seq_puts(m, "PSR Unsupported\n");
2028                 return -ENODEV;
2029         }
2030
2031         if (connector->status != connector_status_connected)
2032                 return -ENODEV;
2033
2034         ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
2035
2036         if (ret == 1) {
2037                 const char *str = "unknown";
2038
2039                 val &= DP_PSR_SINK_STATE_MASK;
2040                 if (val < ARRAY_SIZE(sink_status))
2041                         str = sink_status[val];
2042                 seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
2043         } else {
2044                 return ret;
2045         }
2046
2047         return 0;
2048 }
2049 DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
2050
2051 static void
2052 psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
2053 {
2054         u32 val, status_val;
2055         const char *status = "unknown";
2056
2057         if (dev_priv->psr.psr2_enabled) {
2058                 static const char * const live_status[] = {
2059                         "IDLE",
2060                         "CAPTURE",
2061                         "CAPTURE_FS",
2062                         "SLEEP",
2063                         "BUFON_FW",
2064                         "ML_UP",
2065                         "SU_STANDBY",
2066                         "FAST_SLEEP",
2067                         "DEEP_SLEEP",
2068                         "BUF_ON",
2069                         "TG_ON"
2070                 };
2071                 val = I915_READ(EDP_PSR2_STATUS(dev_priv->psr.transcoder));
2072                 status_val = (val & EDP_PSR2_STATUS_STATE_MASK) >>
2073                               EDP_PSR2_STATUS_STATE_SHIFT;
2074                 if (status_val < ARRAY_SIZE(live_status))
2075                         status = live_status[status_val];
2076         } else {
2077                 static const char * const live_status[] = {
2078                         "IDLE",
2079                         "SRDONACK",
2080                         "SRDENT",
2081                         "BUFOFF",
2082                         "BUFON",
2083                         "AUXACK",
2084                         "SRDOFFACK",
2085                         "SRDENT_ON",
2086                 };
2087                 val = I915_READ(EDP_PSR_STATUS(dev_priv->psr.transcoder));
2088                 status_val = (val & EDP_PSR_STATUS_STATE_MASK) >>
2089                               EDP_PSR_STATUS_STATE_SHIFT;
2090                 if (status_val < ARRAY_SIZE(live_status))
2091                         status = live_status[status_val];
2092         }
2093
2094         seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val);
2095 }
2096
2097 static int i915_edp_psr_status(struct seq_file *m, void *data)
2098 {
2099         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2100         struct i915_psr *psr = &dev_priv->psr;
2101         intel_wakeref_t wakeref;
2102         const char *status;
2103         bool enabled;
2104         u32 val;
2105
2106         if (!HAS_PSR(dev_priv))
2107                 return -ENODEV;
2108
2109         seq_printf(m, "Sink support: %s", yesno(psr->sink_support));
2110         if (psr->dp)
2111                 seq_printf(m, " [0x%02x]", psr->dp->psr_dpcd[0]);
2112         seq_puts(m, "\n");
2113
2114         if (!psr->sink_support)
2115                 return 0;
2116
2117         wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2118         mutex_lock(&psr->lock);
2119
2120         if (psr->enabled)
2121                 status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
2122         else
2123                 status = "disabled";
2124         seq_printf(m, "PSR mode: %s\n", status);
2125
2126         if (!psr->enabled) {
2127                 seq_printf(m, "PSR sink not reliable: %s\n",
2128                            yesno(psr->sink_not_reliable));
2129
2130                 goto unlock;
2131         }
2132
2133         if (psr->psr2_enabled) {
2134                 val = I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder));
2135                 enabled = val & EDP_PSR2_ENABLE;
2136         } else {
2137                 val = I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder));
2138                 enabled = val & EDP_PSR_ENABLE;
2139         }
2140         seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
2141                    enableddisabled(enabled), val);
2142         psr_source_status(dev_priv, m);
2143         seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
2144                    psr->busy_frontbuffer_bits);
2145
2146         /*
2147          * SKL+ Perf counter is reset to 0 everytime DC state is entered
2148          */
2149         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
2150                 val = I915_READ(EDP_PSR_PERF_CNT(dev_priv->psr.transcoder));
2151                 val &= EDP_PSR_PERF_CNT_MASK;
2152                 seq_printf(m, "Performance counter: %u\n", val);
2153         }
2154
2155         if (psr->debug & I915_PSR_DEBUG_IRQ) {
2156                 seq_printf(m, "Last attempted entry at: %lld\n",
2157                            psr->last_entry_attempt);
2158                 seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
2159         }
2160
2161         if (psr->psr2_enabled) {
2162                 u32 su_frames_val[3];
2163                 int frame;
2164
2165                 /*
2166                  * Reading all 3 registers before hand to minimize crossing a
2167                  * frame boundary between register reads
2168                  */
2169                 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
2170                         val = I915_READ(PSR2_SU_STATUS(dev_priv->psr.transcoder,
2171                                                        frame));
2172                         su_frames_val[frame / 3] = val;
2173                 }
2174
2175                 seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
2176
2177                 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
2178                         u32 su_blocks;
2179
2180                         su_blocks = su_frames_val[frame / 3] &
2181                                     PSR2_SU_STATUS_MASK(frame);
2182                         su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
2183                         seq_printf(m, "%d\t%d\n", frame, su_blocks);
2184                 }
2185         }
2186
2187 unlock:
2188         mutex_unlock(&psr->lock);
2189         intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2190
2191         return 0;
2192 }
2193
2194 static int
2195 i915_edp_psr_debug_set(void *data, u64 val)
2196 {
2197         struct drm_i915_private *dev_priv = data;
2198         intel_wakeref_t wakeref;
2199         int ret;
2200
2201         if (!CAN_PSR(dev_priv))
2202                 return -ENODEV;
2203
2204         DRM_DEBUG_KMS("Setting PSR debug to %llx\n", val);
2205
2206         wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2207
2208         ret = intel_psr_debug_set(dev_priv, val);
2209
2210         intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2211
2212         return ret;
2213 }
2214
2215 static int
2216 i915_edp_psr_debug_get(void *data, u64 *val)
2217 {
2218         struct drm_i915_private *dev_priv = data;
2219
2220         if (!CAN_PSR(dev_priv))
2221                 return -ENODEV;
2222
2223         *val = READ_ONCE(dev_priv->psr.debug);
2224         return 0;
2225 }
2226
2227 DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
2228                         i915_edp_psr_debug_get, i915_edp_psr_debug_set,
2229                         "%llu\n");
2230
2231 static int i915_energy_uJ(struct seq_file *m, void *data)
2232 {
2233         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2234         unsigned long long power;
2235         intel_wakeref_t wakeref;
2236         u32 units;
2237
2238         if (INTEL_GEN(dev_priv) < 6)
2239                 return -ENODEV;
2240
2241         if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power))
2242                 return -ENODEV;
2243
2244         units = (power & 0x1f00) >> 8;
2245         with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
2246                 power = I915_READ(MCH_SECP_NRG_STTS);
2247
2248         power = (1000000 * power) >> units; /* convert to uJ */
2249         seq_printf(m, "%llu", power);
2250
2251         return 0;
2252 }
2253
2254 static int i915_runtime_pm_status(struct seq_file *m, void *unused)
2255 {
2256         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2257         struct pci_dev *pdev = dev_priv->drm.pdev;
2258
2259         if (!HAS_RUNTIME_PM(dev_priv))
2260                 seq_puts(m, "Runtime power management not supported\n");
2261
2262         seq_printf(m, "Runtime power status: %s\n",
2263                    enableddisabled(!dev_priv->power_domains.wakeref));
2264
2265         seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->gt.awake));
2266         seq_printf(m, "IRQs disabled: %s\n",
2267                    yesno(!intel_irqs_enabled(dev_priv)));
2268 #ifdef CONFIG_PM
2269         seq_printf(m, "Usage count: %d\n",
2270                    atomic_read(&dev_priv->drm.dev->power.usage_count));
2271 #else
2272         seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
2273 #endif
2274         seq_printf(m, "PCI device power state: %s [%d]\n",
2275                    pci_power_name(pdev->current_state),
2276                    pdev->current_state);
2277
2278         if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)) {
2279                 struct drm_printer p = drm_seq_file_printer(m);
2280
2281                 print_intel_runtime_pm_wakeref(&dev_priv->runtime_pm, &p);
2282         }
2283
2284         return 0;
2285 }
2286
2287 static int i915_power_domain_info(struct seq_file *m, void *unused)
2288 {
2289         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2290         struct i915_power_domains *power_domains = &dev_priv->power_domains;
2291         int i;
2292
2293         mutex_lock(&power_domains->lock);
2294
2295         seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
2296         for (i = 0; i < power_domains->power_well_count; i++) {
2297                 struct i915_power_well *power_well;
2298                 enum intel_display_power_domain power_domain;
2299
2300                 power_well = &power_domains->power_wells[i];
2301                 seq_printf(m, "%-25s %d\n", power_well->desc->name,
2302                            power_well->count);
2303
2304                 for_each_power_domain(power_domain, power_well->desc->domains)
2305                         seq_printf(m, "  %-23s %d\n",
2306                                  intel_display_power_domain_str(power_domain),
2307                                  power_domains->domain_use_count[power_domain]);
2308         }
2309
2310         mutex_unlock(&power_domains->lock);
2311
2312         return 0;
2313 }
2314
2315 static int i915_dmc_info(struct seq_file *m, void *unused)
2316 {
2317         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2318         intel_wakeref_t wakeref;
2319         struct intel_csr *csr;
2320         i915_reg_t dc5_reg, dc6_reg = {};
2321
2322         if (!HAS_CSR(dev_priv))
2323                 return -ENODEV;
2324
2325         csr = &dev_priv->csr;
2326
2327         wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2328
2329         seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
2330         seq_printf(m, "path: %s\n", csr->fw_path);
2331
2332         if (!csr->dmc_payload)
2333                 goto out;
2334
2335         seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
2336                    CSR_VERSION_MINOR(csr->version));
2337
2338         if (INTEL_GEN(dev_priv) >= 12) {
2339                 dc5_reg = TGL_DMC_DEBUG_DC5_COUNT;
2340                 dc6_reg = TGL_DMC_DEBUG_DC6_COUNT;
2341                 /*
2342                  * NOTE: DMC_DEBUG3 is a general purpose reg.
2343                  * According to B.Specs:49196 DMC f/w reuses DC5/6 counter
2344                  * reg for DC3CO debugging and validation,
2345                  * but TGL DMC f/w is using DMC_DEBUG3 reg for DC3CO counter.
2346                  */
2347                 seq_printf(m, "DC3CO count: %d\n", I915_READ(DMC_DEBUG3));
2348         } else {
2349                 dc5_reg = IS_BROXTON(dev_priv) ? BXT_CSR_DC3_DC5_COUNT :
2350                                                  SKL_CSR_DC3_DC5_COUNT;
2351                 if (!IS_GEN9_LP(dev_priv))
2352                         dc6_reg = SKL_CSR_DC5_DC6_COUNT;
2353         }
2354
2355         seq_printf(m, "DC3 -> DC5 count: %d\n", I915_READ(dc5_reg));
2356         if (dc6_reg.reg)
2357                 seq_printf(m, "DC5 -> DC6 count: %d\n", I915_READ(dc6_reg));
2358
2359 out:
2360         seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
2361         seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE));
2362         seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL));
2363
2364         intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2365
2366         return 0;
2367 }
2368
2369 static void intel_seq_print_mode(struct seq_file *m, int tabs,
2370                                  struct drm_display_mode *mode)
2371 {
2372         int i;
2373
2374         for (i = 0; i < tabs; i++)
2375                 seq_putc(m, '\t');
2376
2377         seq_printf(m, DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
2378 }
2379
2380 static void intel_encoder_info(struct seq_file *m,
2381                                struct intel_crtc *intel_crtc,
2382                                struct intel_encoder *intel_encoder)
2383 {
2384         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2385         struct drm_device *dev = &dev_priv->drm;
2386         struct drm_crtc *crtc = &intel_crtc->base;
2387         struct intel_connector *intel_connector;
2388         struct drm_encoder *encoder;
2389
2390         encoder = &intel_encoder->base;
2391         seq_printf(m, "\tencoder %d: type: %s, connectors:\n",
2392                    encoder->base.id, encoder->name);
2393         for_each_connector_on_encoder(dev, encoder, intel_connector) {
2394                 struct drm_connector *connector = &intel_connector->base;
2395                 seq_printf(m, "\t\tconnector %d: type: %s, status: %s",
2396                            connector->base.id,
2397                            connector->name,
2398                            drm_get_connector_status_name(connector->status));
2399                 if (connector->status == connector_status_connected) {
2400                         struct drm_display_mode *mode = &crtc->mode;
2401                         seq_printf(m, ", mode:\n");
2402                         intel_seq_print_mode(m, 2, mode);
2403                 } else {
2404                         seq_putc(m, '\n');
2405                 }
2406         }
2407 }
2408
2409 static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2410 {
2411         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2412         struct drm_device *dev = &dev_priv->drm;
2413         struct drm_crtc *crtc = &intel_crtc->base;
2414         struct intel_encoder *intel_encoder;
2415         struct drm_plane_state *plane_state = crtc->primary->state;
2416         struct drm_framebuffer *fb = plane_state->fb;
2417
2418         if (fb)
2419                 seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
2420                            fb->base.id, plane_state->src_x >> 16,
2421                            plane_state->src_y >> 16, fb->width, fb->height);
2422         else
2423                 seq_puts(m, "\tprimary plane disabled\n");
2424         for_each_encoder_on_crtc(dev, crtc, intel_encoder)
2425                 intel_encoder_info(m, intel_crtc, intel_encoder);
2426 }
2427
2428 static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
2429 {
2430         struct drm_display_mode *mode = panel->fixed_mode;
2431
2432         seq_printf(m, "\tfixed mode:\n");
2433         intel_seq_print_mode(m, 2, mode);
2434 }
2435
2436 static void intel_hdcp_info(struct seq_file *m,
2437                             struct intel_connector *intel_connector)
2438 {
2439         bool hdcp_cap, hdcp2_cap;
2440
2441         hdcp_cap = intel_hdcp_capable(intel_connector);
2442         hdcp2_cap = intel_hdcp2_capable(intel_connector);
2443
2444         if (hdcp_cap)
2445                 seq_puts(m, "HDCP1.4 ");
2446         if (hdcp2_cap)
2447                 seq_puts(m, "HDCP2.2 ");
2448
2449         if (!hdcp_cap && !hdcp2_cap)
2450                 seq_puts(m, "None");
2451
2452         seq_puts(m, "\n");
2453 }
2454
2455 static void intel_dp_info(struct seq_file *m,
2456                           struct intel_connector *intel_connector)
2457 {
2458         struct intel_encoder *intel_encoder = intel_connector->encoder;
2459         struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
2460
2461         seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
2462         seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
2463         if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
2464                 intel_panel_info(m, &intel_connector->panel);
2465
2466         drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
2467                                 &intel_dp->aux);
2468         if (intel_connector->hdcp.shim) {
2469                 seq_puts(m, "\tHDCP version: ");
2470                 intel_hdcp_info(m, intel_connector);
2471         }
2472 }
2473
2474 static void intel_dp_mst_info(struct seq_file *m,
2475                           struct intel_connector *intel_connector)
2476 {
2477         struct intel_encoder *intel_encoder = intel_connector->encoder;
2478         struct intel_dp_mst_encoder *intel_mst =
2479                 enc_to_mst(&intel_encoder->base);
2480         struct intel_digital_port *intel_dig_port = intel_mst->primary;
2481         struct intel_dp *intel_dp = &intel_dig_port->dp;
2482         bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
2483                                         intel_connector->port);
2484
2485         seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
2486 }
2487
2488 static void intel_hdmi_info(struct seq_file *m,
2489                             struct intel_connector *intel_connector)
2490 {
2491         struct intel_encoder *intel_encoder = intel_connector->encoder;
2492         struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
2493
2494         seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
2495         if (intel_connector->hdcp.shim) {
2496                 seq_puts(m, "\tHDCP version: ");
2497                 intel_hdcp_info(m, intel_connector);
2498         }
2499 }
2500
2501 static void intel_lvds_info(struct seq_file *m,
2502                             struct intel_connector *intel_connector)
2503 {
2504         intel_panel_info(m, &intel_connector->panel);
2505 }
2506
2507 static void intel_connector_info(struct seq_file *m,
2508                                  struct drm_connector *connector)
2509 {
2510         struct intel_connector *intel_connector = to_intel_connector(connector);
2511         struct intel_encoder *intel_encoder = intel_connector->encoder;
2512         struct drm_display_mode *mode;
2513
2514         seq_printf(m, "connector %d: type %s, status: %s\n",
2515                    connector->base.id, connector->name,
2516                    drm_get_connector_status_name(connector->status));
2517
2518         if (connector->status == connector_status_disconnected)
2519                 return;
2520
2521         seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
2522                    connector->display_info.width_mm,
2523                    connector->display_info.height_mm);
2524         seq_printf(m, "\tsubpixel order: %s\n",
2525                    drm_get_subpixel_order_name(connector->display_info.subpixel_order));
2526         seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev);
2527
2528         if (!intel_encoder)
2529                 return;
2530
2531         switch (connector->connector_type) {
2532         case DRM_MODE_CONNECTOR_DisplayPort:
2533         case DRM_MODE_CONNECTOR_eDP:
2534                 if (intel_encoder->type == INTEL_OUTPUT_DP_MST)
2535                         intel_dp_mst_info(m, intel_connector);
2536                 else
2537                         intel_dp_info(m, intel_connector);
2538                 break;
2539         case DRM_MODE_CONNECTOR_LVDS:
2540                 if (intel_encoder->type == INTEL_OUTPUT_LVDS)
2541                         intel_lvds_info(m, intel_connector);
2542                 break;
2543         case DRM_MODE_CONNECTOR_HDMIA:
2544                 if (intel_encoder->type == INTEL_OUTPUT_HDMI ||
2545                     intel_encoder->type == INTEL_OUTPUT_DDI)
2546                         intel_hdmi_info(m, intel_connector);
2547                 break;
2548         default:
2549                 break;
2550         }
2551
2552         seq_printf(m, "\tmodes:\n");
2553         list_for_each_entry(mode, &connector->modes, head)
2554                 intel_seq_print_mode(m, 2, mode);
2555 }
2556
2557 static const char *plane_type(enum drm_plane_type type)
2558 {
2559         switch (type) {
2560         case DRM_PLANE_TYPE_OVERLAY:
2561                 return "OVL";
2562         case DRM_PLANE_TYPE_PRIMARY:
2563                 return "PRI";
2564         case DRM_PLANE_TYPE_CURSOR:
2565                 return "CUR";
2566         /*
2567          * Deliberately omitting default: to generate compiler warnings
2568          * when a new drm_plane_type gets added.
2569          */
2570         }
2571
2572         return "unknown";
2573 }
2574
2575 static void plane_rotation(char *buf, size_t bufsize, unsigned int rotation)
2576 {
2577         /*
2578          * According to doc only one DRM_MODE_ROTATE_ is allowed but this
2579          * will print them all to visualize if the values are misused
2580          */
2581         snprintf(buf, bufsize,
2582                  "%s%s%s%s%s%s(0x%08x)",
2583                  (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
2584                  (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
2585                  (rotation & DRM_MODE_ROTATE_180) ? "180 " : "",
2586                  (rotation & DRM_MODE_ROTATE_270) ? "270 " : "",
2587                  (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
2588                  (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
2589                  rotation);
2590 }
2591
2592 static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2593 {
2594         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2595         struct drm_device *dev = &dev_priv->drm;
2596         struct intel_plane *intel_plane;
2597
2598         for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
2599                 struct drm_plane_state *state;
2600                 struct drm_plane *plane = &intel_plane->base;
2601                 struct drm_format_name_buf format_name;
2602                 struct drm_rect src, dst;
2603                 char rot_str[48];
2604
2605                 if (!plane->state) {
2606                         seq_puts(m, "plane->state is NULL!\n");
2607                         continue;
2608                 }
2609
2610                 state = plane->state;
2611
2612                 src = drm_plane_state_src(state);
2613                 dst = drm_plane_state_dest(state);
2614
2615                 if (state->fb) {
2616                         drm_get_format_name(state->fb->format->format,
2617                                             &format_name);
2618                 } else {
2619                         sprintf(format_name.str, "N/A");
2620                 }
2621
2622                 plane_rotation(rot_str, sizeof(rot_str), state->rotation);
2623
2624                 seq_printf(m, "\t--Plane id %d: type=%s, dst=" DRM_RECT_FMT ", src=" DRM_RECT_FP_FMT ", format=%s, rotation=%s\n",
2625                            plane->base.id,
2626                            plane_type(intel_plane->base.type),
2627                            DRM_RECT_ARG(&dst),
2628                            DRM_RECT_FP_ARG(&src),
2629                            format_name.str,
2630                            rot_str);
2631         }
2632 }
2633
2634 static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2635 {
2636         struct intel_crtc_state *pipe_config;
2637         int num_scalers = intel_crtc->num_scalers;
2638         int i;
2639
2640         pipe_config = to_intel_crtc_state(intel_crtc->base.state);
2641
2642         /* Not all platformas have a scaler */
2643         if (num_scalers) {
2644                 seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
2645                            num_scalers,
2646                            pipe_config->scaler_state.scaler_users,
2647                            pipe_config->scaler_state.scaler_id);
2648
2649                 for (i = 0; i < num_scalers; i++) {
2650                         struct intel_scaler *sc =
2651                                         &pipe_config->scaler_state.scalers[i];
2652
2653                         seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
2654                                    i, yesno(sc->in_use), sc->mode);
2655                 }
2656                 seq_puts(m, "\n");
2657         } else {
2658                 seq_puts(m, "\tNo scalers available on this platform\n");
2659         }
2660 }
2661
2662 static int i915_display_info(struct seq_file *m, void *unused)
2663 {
2664         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2665         struct drm_device *dev = &dev_priv->drm;
2666         struct intel_crtc *crtc;
2667         struct drm_connector *connector;
2668         struct drm_connector_list_iter conn_iter;
2669         intel_wakeref_t wakeref;
2670
2671         wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2672
2673         seq_printf(m, "CRTC info\n");
2674         seq_printf(m, "---------\n");
2675         for_each_intel_crtc(dev, crtc) {
2676                 struct intel_crtc_state *pipe_config;
2677
2678                 drm_modeset_lock(&crtc->base.mutex, NULL);
2679                 pipe_config = to_intel_crtc_state(crtc->base.state);
2680
2681                 seq_printf(m, "CRTC %d: pipe: %c, active=%s, (size=%dx%d), dither=%s, bpp=%d\n",
2682                            crtc->base.base.id, pipe_name(crtc->pipe),
2683                            yesno(pipe_config->hw.active),
2684                            pipe_config->pipe_src_w, pipe_config->pipe_src_h,
2685                            yesno(pipe_config->dither), pipe_config->pipe_bpp);
2686
2687                 if (pipe_config->hw.active) {
2688                         struct intel_plane *cursor =
2689                                 to_intel_plane(crtc->base.cursor);
2690
2691                         intel_crtc_info(m, crtc);
2692
2693                         seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x\n",
2694                                    yesno(cursor->base.state->visible),
2695                                    cursor->base.state->crtc_x,
2696                                    cursor->base.state->crtc_y,
2697                                    cursor->base.state->crtc_w,
2698                                    cursor->base.state->crtc_h,
2699                                    cursor->cursor.base);
2700                         intel_scaler_info(m, crtc);
2701                         intel_plane_info(m, crtc);
2702                 }
2703
2704                 seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
2705                            yesno(!crtc->cpu_fifo_underrun_disabled),
2706                            yesno(!crtc->pch_fifo_underrun_disabled));
2707                 drm_modeset_unlock(&crtc->base.mutex);
2708         }
2709
2710         seq_printf(m, "\n");
2711         seq_printf(m, "Connector info\n");
2712         seq_printf(m, "--------------\n");
2713         mutex_lock(&dev->mode_config.mutex);
2714         drm_connector_list_iter_begin(dev, &conn_iter);
2715         drm_for_each_connector_iter(connector, &conn_iter)
2716                 intel_connector_info(m, connector);
2717         drm_connector_list_iter_end(&conn_iter);
2718         mutex_unlock(&dev->mode_config.mutex);
2719
2720         intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2721
2722         return 0;
2723 }
2724
2725 static int i915_engine_info(struct seq_file *m, void *unused)
2726 {
2727         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2728         struct intel_engine_cs *engine;
2729         intel_wakeref_t wakeref;
2730         struct drm_printer p;
2731
2732         wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
2733
2734         seq_printf(m, "GT awake? %s [%d]\n",
2735                    yesno(dev_priv->gt.awake),
2736                    atomic_read(&dev_priv->gt.wakeref.count));
2737         seq_printf(m, "CS timestamp frequency: %u kHz\n",
2738                    RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz);
2739
2740         p = drm_seq_file_printer(m);
2741         for_each_uabi_engine(engine, dev_priv)
2742                 intel_engine_dump(engine, &p, "%s\n", engine->name);
2743
2744         intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
2745
2746         return 0;
2747 }
2748
2749 static int i915_rcs_topology(struct seq_file *m, void *unused)
2750 {
2751         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2752         struct drm_printer p = drm_seq_file_printer(m);
2753
2754         intel_device_info_dump_topology(&RUNTIME_INFO(dev_priv)->sseu, &p);
2755
2756         return 0;
2757 }
2758
2759 static int i915_shrinker_info(struct seq_file *m, void *unused)
2760 {
2761         struct drm_i915_private *i915 = node_to_i915(m->private);
2762
2763         seq_printf(m, "seeks = %d\n", i915->mm.shrinker.seeks);
2764         seq_printf(m, "batch = %lu\n", i915->mm.shrinker.batch);
2765
2766         return 0;
2767 }
2768
2769 static int i915_shared_dplls_info(struct seq_file *m, void *unused)
2770 {
2771         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2772         struct drm_device *dev = &dev_priv->drm;
2773         int i;
2774
2775         drm_modeset_lock_all(dev);
2776         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
2777                 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
2778
2779                 seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name,
2780                            pll->info->id);
2781                 seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
2782                            pll->state.crtc_mask, pll->active_mask, yesno(pll->on));
2783                 seq_printf(m, " tracked hardware state:\n");
2784                 seq_printf(m, " dpll:    0x%08x\n", pll->state.hw_state.dpll);
2785                 seq_printf(m, " dpll_md: 0x%08x\n",
2786                            pll->state.hw_state.dpll_md);
2787                 seq_printf(m, " fp0:     0x%08x\n", pll->state.hw_state.fp0);
2788                 seq_printf(m, " fp1:     0x%08x\n", pll->state.hw_state.fp1);
2789                 seq_printf(m, " wrpll:   0x%08x\n", pll->state.hw_state.wrpll);
2790                 seq_printf(m, " cfgcr0:  0x%08x\n", pll->state.hw_state.cfgcr0);
2791                 seq_printf(m, " cfgcr1:  0x%08x\n", pll->state.hw_state.cfgcr1);
2792                 seq_printf(m, " mg_refclkin_ctl:        0x%08x\n",
2793                            pll->state.hw_state.mg_refclkin_ctl);
2794                 seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n",
2795                            pll->state.hw_state.mg_clktop2_coreclkctl1);
2796                 seq_printf(m, " mg_clktop2_hsclkctl:    0x%08x\n",
2797                            pll->state.hw_state.mg_clktop2_hsclkctl);
2798                 seq_printf(m, " mg_pll_div0:  0x%08x\n",
2799                            pll->state.hw_state.mg_pll_div0);
2800                 seq_printf(m, " mg_pll_div1:  0x%08x\n",
2801                            pll->state.hw_state.mg_pll_div1);
2802                 seq_printf(m, " mg_pll_lf:    0x%08x\n",
2803                            pll->state.hw_state.mg_pll_lf);
2804                 seq_printf(m, " mg_pll_frac_lock: 0x%08x\n",
2805                            pll->state.hw_state.mg_pll_frac_lock);
2806                 seq_printf(m, " mg_pll_ssc:   0x%08x\n",
2807                            pll->state.hw_state.mg_pll_ssc);
2808                 seq_printf(m, " mg_pll_bias:  0x%08x\n",
2809                            pll->state.hw_state.mg_pll_bias);
2810                 seq_printf(m, " mg_pll_tdc_coldst_bias: 0x%08x\n",
2811                            pll->state.hw_state.mg_pll_tdc_coldst_bias);
2812         }
2813         drm_modeset_unlock_all(dev);
2814
2815         return 0;
2816 }
2817
2818 static int i915_wa_registers(struct seq_file *m, void *unused)
2819 {
2820         struct drm_i915_private *i915 = node_to_i915(m->private);
2821         struct intel_engine_cs *engine;
2822
2823         for_each_uabi_engine(engine, i915) {
2824                 const struct i915_wa_list *wal = &engine->ctx_wa_list;
2825                 const struct i915_wa *wa;
2826                 unsigned int count;
2827
2828                 count = wal->count;
2829                 if (!count)
2830                         continue;
2831
2832                 seq_printf(m, "%s: Workarounds applied: %u\n",
2833                            engine->name, count);
2834
2835                 for (wa = wal->list; count--; wa++)
2836                         seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n",
2837                                    i915_mmio_reg_offset(wa->reg),
2838                                    wa->val, wa->mask);
2839
2840                 seq_printf(m, "\n");
2841         }
2842
2843         return 0;
2844 }
2845
2846 static int i915_ipc_status_show(struct seq_file *m, void *data)
2847 {
2848         struct drm_i915_private *dev_priv = m->private;
2849
2850         seq_printf(m, "Isochronous Priority Control: %s\n",
2851                         yesno(dev_priv->ipc_enabled));
2852         return 0;
2853 }
2854
2855 static int i915_ipc_status_open(struct inode *inode, struct file *file)
2856 {
2857         struct drm_i915_private *dev_priv = inode->i_private;
2858
2859         if (!HAS_IPC(dev_priv))
2860                 return -ENODEV;
2861
2862         return single_open(file, i915_ipc_status_show, dev_priv);
2863 }
2864
2865 static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
2866                                      size_t len, loff_t *offp)
2867 {
2868         struct seq_file *m = file->private_data;
2869         struct drm_i915_private *dev_priv = m->private;
2870         intel_wakeref_t wakeref;
2871         bool enable;
2872         int ret;
2873
2874         ret = kstrtobool_from_user(ubuf, len, &enable);
2875         if (ret < 0)
2876                 return ret;
2877
2878         with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
2879                 if (!dev_priv->ipc_enabled && enable)
2880                         DRM_INFO("Enabling IPC: WM will be proper only after next commit\n");
2881                 dev_priv->wm.distrust_bios_wm = true;
2882                 dev_priv->ipc_enabled = enable;
2883                 intel_enable_ipc(dev_priv);
2884         }
2885
2886         return len;
2887 }
2888
2889 static const struct file_operations i915_ipc_status_fops = {
2890         .owner = THIS_MODULE,
2891         .open = i915_ipc_status_open,
2892         .read = seq_read,
2893         .llseek = seq_lseek,
2894         .release = single_release,
2895         .write = i915_ipc_status_write
2896 };
2897
2898 static int i915_ddb_info(struct seq_file *m, void *unused)
2899 {
2900         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2901         struct drm_device *dev = &dev_priv->drm;
2902         struct skl_ddb_entry *entry;
2903         struct intel_crtc *crtc;
2904
2905         if (INTEL_GEN(dev_priv) < 9)
2906                 return -ENODEV;
2907
2908         drm_modeset_lock_all(dev);
2909
2910         seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
2911
2912         for_each_intel_crtc(&dev_priv->drm, crtc) {
2913                 struct intel_crtc_state *crtc_state =
2914                         to_intel_crtc_state(crtc->base.state);
2915                 enum pipe pipe = crtc->pipe;
2916                 enum plane_id plane_id;
2917
2918                 seq_printf(m, "Pipe %c\n", pipe_name(pipe));
2919
2920                 for_each_plane_id_on_crtc(crtc, plane_id) {
2921                         entry = &crtc_state->wm.skl.plane_ddb_y[plane_id];
2922                         seq_printf(m, "  Plane%-8d%8u%8u%8u\n", plane_id + 1,
2923                                    entry->start, entry->end,
2924                                    skl_ddb_entry_size(entry));
2925                 }
2926
2927                 entry = &crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
2928                 seq_printf(m, "  %-13s%8u%8u%8u\n", "Cursor", entry->start,
2929                            entry->end, skl_ddb_entry_size(entry));
2930         }
2931
2932         drm_modeset_unlock_all(dev);
2933
2934         return 0;
2935 }
2936
2937 static void drrs_status_per_crtc(struct seq_file *m,
2938                                  struct drm_device *dev,
2939                                  struct intel_crtc *intel_crtc)
2940 {
2941         struct drm_i915_private *dev_priv = to_i915(dev);
2942         struct i915_drrs *drrs = &dev_priv->drrs;
2943         int vrefresh = 0;
2944         struct drm_connector *connector;
2945         struct drm_connector_list_iter conn_iter;
2946
2947         drm_connector_list_iter_begin(dev, &conn_iter);
2948         drm_for_each_connector_iter(connector, &conn_iter) {
2949                 if (connector->state->crtc != &intel_crtc->base)
2950                         continue;
2951
2952                 seq_printf(m, "%s:\n", connector->name);
2953         }
2954         drm_connector_list_iter_end(&conn_iter);
2955
2956         if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT)
2957                 seq_puts(m, "\tVBT: DRRS_type: Static");
2958         else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT)
2959                 seq_puts(m, "\tVBT: DRRS_type: Seamless");
2960         else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED)
2961                 seq_puts(m, "\tVBT: DRRS_type: None");
2962         else
2963                 seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value");
2964
2965         seq_puts(m, "\n\n");
2966
2967         if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
2968                 struct intel_panel *panel;
2969
2970                 mutex_lock(&drrs->mutex);
2971                 /* DRRS Supported */
2972                 seq_puts(m, "\tDRRS Supported: Yes\n");
2973
2974                 /* disable_drrs() will make drrs->dp NULL */
2975                 if (!drrs->dp) {
2976                         seq_puts(m, "Idleness DRRS: Disabled\n");
2977                         if (dev_priv->psr.enabled)
2978                                 seq_puts(m,
2979                                 "\tAs PSR is enabled, DRRS is not enabled\n");
2980                         mutex_unlock(&drrs->mutex);
2981                         return;
2982                 }
2983
2984                 panel = &drrs->dp->attached_connector->panel;
2985                 seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
2986                                         drrs->busy_frontbuffer_bits);
2987
2988                 seq_puts(m, "\n\t\t");
2989                 if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
2990                         seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
2991                         vrefresh = panel->fixed_mode->vrefresh;
2992                 } else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
2993                         seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
2994                         vrefresh = panel->downclock_mode->vrefresh;
2995                 } else {
2996                         seq_printf(m, "DRRS_State: Unknown(%d)\n",
2997                                                 drrs->refresh_rate_type);
2998                         mutex_unlock(&drrs->mutex);
2999                         return;
3000                 }
3001                 seq_printf(m, "\t\tVrefresh: %d", vrefresh);
3002
3003                 seq_puts(m, "\n\t\t");
3004                 mutex_unlock(&drrs->mutex);
3005         } else {
3006                 /* DRRS not supported. Print the VBT parameter*/
3007                 seq_puts(m, "\tDRRS Supported : No");
3008         }
3009         seq_puts(m, "\n");
3010 }
3011
3012 static int i915_drrs_status(struct seq_file *m, void *unused)
3013 {
3014         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3015         struct drm_device *dev = &dev_priv->drm;
3016         struct intel_crtc *intel_crtc;
3017         int active_crtc_cnt = 0;
3018
3019         drm_modeset_lock_all(dev);
3020         for_each_intel_crtc(dev, intel_crtc) {
3021                 if (intel_crtc->base.state->active) {
3022                         active_crtc_cnt++;
3023                         seq_printf(m, "\nCRTC %d:  ", active_crtc_cnt);
3024
3025                         drrs_status_per_crtc(m, dev, intel_crtc);
3026                 }
3027         }
3028         drm_modeset_unlock_all(dev);
3029
3030         if (!active_crtc_cnt)
3031                 seq_puts(m, "No active crtc found\n");
3032
3033         return 0;
3034 }
3035
3036 static int i915_dp_mst_info(struct seq_file *m, void *unused)
3037 {
3038         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3039         struct drm_device *dev = &dev_priv->drm;
3040         struct intel_encoder *intel_encoder;
3041         struct intel_digital_port *intel_dig_port;
3042         struct drm_connector *connector;
3043         struct drm_connector_list_iter conn_iter;
3044
3045         drm_connector_list_iter_begin(dev, &conn_iter);
3046         drm_for_each_connector_iter(connector, &conn_iter) {
3047                 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
3048                         continue;
3049
3050                 intel_encoder = intel_attached_encoder(connector);
3051                 if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
3052                         continue;
3053
3054                 intel_dig_port = enc_to_dig_port(&intel_encoder->base);
3055                 if (!intel_dig_port->dp.can_mst)
3056                         continue;
3057
3058                 seq_printf(m, "MST Source Port [ENCODER:%d:%s]\n",
3059                            intel_dig_port->base.base.base.id,
3060                            intel_dig_port->base.base.name);
3061                 drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
3062         }
3063         drm_connector_list_iter_end(&conn_iter);
3064
3065         return 0;
3066 }
3067
3068 static ssize_t i915_displayport_test_active_write(struct file *file,
3069                                                   const char __user *ubuf,
3070                                                   size_t len, loff_t *offp)
3071 {
3072         char *input_buffer;
3073         int status = 0;
3074         struct drm_device *dev;
3075         struct drm_connector *connector;
3076         struct drm_connector_list_iter conn_iter;
3077         struct intel_dp *intel_dp;
3078         int val = 0;
3079
3080         dev = ((struct seq_file *)file->private_data)->private;
3081
3082         if (len == 0)
3083                 return 0;
3084
3085         input_buffer = memdup_user_nul(ubuf, len);
3086         if (IS_ERR(input_buffer))
3087                 return PTR_ERR(input_buffer);
3088
3089         DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);
3090
3091         drm_connector_list_iter_begin(dev, &conn_iter);
3092         drm_for_each_connector_iter(connector, &conn_iter) {
3093                 struct intel_encoder *encoder;
3094
3095                 if (connector->connector_type !=
3096                     DRM_MODE_CONNECTOR_DisplayPort)
3097                         continue;
3098
3099                 encoder = to_intel_encoder(connector->encoder);
3100                 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3101                         continue;
3102
3103                 if (encoder && connector->status == connector_status_connected) {
3104                         intel_dp = enc_to_intel_dp(&encoder->base);
3105                         status = kstrtoint(input_buffer, 10, &val);
3106                         if (status < 0)
3107                                 break;
3108                         DRM_DEBUG_DRIVER("Got %d for test active\n", val);
3109                         /* To prevent erroneous activation of the compliance
3110                          * testing code, only accept an actual value of 1 here
3111                          */
3112                         if (val == 1)
3113                                 intel_dp->compliance.test_active = 1;
3114                         else
3115                                 intel_dp->compliance.test_active = 0;
3116                 }
3117         }
3118         drm_connector_list_iter_end(&conn_iter);
3119         kfree(input_buffer);
3120         if (status < 0)
3121                 return status;
3122
3123         *offp += len;
3124         return len;
3125 }
3126
3127 static int i915_displayport_test_active_show(struct seq_file *m, void *data)
3128 {
3129         struct drm_i915_private *dev_priv = m->private;
3130         struct drm_device *dev = &dev_priv->drm;
3131         struct drm_connector *connector;
3132         struct drm_connector_list_iter conn_iter;
3133         struct intel_dp *intel_dp;
3134
3135         drm_connector_list_iter_begin(dev, &conn_iter);
3136         drm_for_each_connector_iter(connector, &conn_iter) {
3137                 struct intel_encoder *encoder;
3138
3139                 if (connector->connector_type !=
3140                     DRM_MODE_CONNECTOR_DisplayPort)
3141                         continue;
3142
3143                 encoder = to_intel_encoder(connector->encoder);
3144                 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3145                         continue;
3146
3147                 if (encoder && connector->status == connector_status_connected) {
3148                         intel_dp = enc_to_intel_dp(&encoder->base);
3149                         if (intel_dp->compliance.test_active)
3150                                 seq_puts(m, "1");
3151                         else
3152                                 seq_puts(m, "0");
3153                 } else
3154                         seq_puts(m, "0");
3155         }
3156         drm_connector_list_iter_end(&conn_iter);
3157
3158         return 0;
3159 }
3160
3161 static int i915_displayport_test_active_open(struct inode *inode,
3162                                              struct file *file)
3163 {
3164         return single_open(file, i915_displayport_test_active_show,
3165                            inode->i_private);
3166 }
3167
3168 static const struct file_operations i915_displayport_test_active_fops = {
3169         .owner = THIS_MODULE,
3170         .open = i915_displayport_test_active_open,
3171         .read = seq_read,
3172         .llseek = seq_lseek,
3173         .release = single_release,
3174         .write = i915_displayport_test_active_write
3175 };
3176
3177 static int i915_displayport_test_data_show(struct seq_file *m, void *data)
3178 {
3179         struct drm_i915_private *dev_priv = m->private;
3180         struct drm_device *dev = &dev_priv->drm;
3181         struct drm_connector *connector;
3182         struct drm_connector_list_iter conn_iter;
3183         struct intel_dp *intel_dp;
3184
3185         drm_connector_list_iter_begin(dev, &conn_iter);
3186         drm_for_each_connector_iter(connector, &conn_iter) {
3187                 struct intel_encoder *encoder;
3188
3189                 if (connector->connector_type !=
3190                     DRM_MODE_CONNECTOR_DisplayPort)
3191                         continue;
3192
3193                 encoder = to_intel_encoder(connector->encoder);
3194                 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3195                         continue;
3196
3197                 if (encoder && connector->status == connector_status_connected) {
3198                         intel_dp = enc_to_intel_dp(&encoder->base);
3199                         if (intel_dp->compliance.test_type ==
3200                             DP_TEST_LINK_EDID_READ)
3201                                 seq_printf(m, "%lx",
3202                                            intel_dp->compliance.test_data.edid);
3203                         else if (intel_dp->compliance.test_type ==
3204                                  DP_TEST_LINK_VIDEO_PATTERN) {
3205                                 seq_printf(m, "hdisplay: %d\n",
3206                                            intel_dp->compliance.test_data.hdisplay);
3207                                 seq_printf(m, "vdisplay: %d\n",
3208                                            intel_dp->compliance.test_data.vdisplay);
3209                                 seq_printf(m, "bpc: %u\n",
3210                                            intel_dp->compliance.test_data.bpc);
3211                         }
3212                 } else
3213                         seq_puts(m, "0");
3214         }
3215         drm_connector_list_iter_end(&conn_iter);
3216
3217         return 0;
3218 }
3219 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data);
3220
3221 static int i915_displayport_test_type_show(struct seq_file *m, void *data)
3222 {
3223         struct drm_i915_private *dev_priv = m->private;
3224         struct drm_device *dev = &dev_priv->drm;
3225         struct drm_connector *connector;
3226         struct drm_connector_list_iter conn_iter;
3227         struct intel_dp *intel_dp;
3228
3229         drm_connector_list_iter_begin(dev, &conn_iter);
3230         drm_for_each_connector_iter(connector, &conn_iter) {
3231                 struct intel_encoder *encoder;
3232
3233                 if (connector->connector_type !=
3234                     DRM_MODE_CONNECTOR_DisplayPort)
3235                         continue;
3236
3237                 encoder = to_intel_encoder(connector->encoder);
3238                 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3239                         continue;
3240
3241                 if (encoder && connector->status == connector_status_connected) {
3242                         intel_dp = enc_to_intel_dp(&encoder->base);
3243                         seq_printf(m, "%02lx", intel_dp->compliance.test_type);
3244                 } else
3245                         seq_puts(m, "0");
3246         }
3247         drm_connector_list_iter_end(&conn_iter);
3248
3249         return 0;
3250 }
3251 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type);
3252
3253 static void wm_latency_show(struct seq_file *m, const u16 wm[8])
3254 {
3255         struct drm_i915_private *dev_priv = m->private;
3256         struct drm_device *dev = &dev_priv->drm;
3257         int level;
3258         int num_levels;
3259
3260         if (IS_CHERRYVIEW(dev_priv))
3261                 num_levels = 3;
3262         else if (IS_VALLEYVIEW(dev_priv))
3263                 num_levels = 1;
3264         else if (IS_G4X(dev_priv))
3265                 num_levels = 3;
3266         else
3267                 num_levels = ilk_wm_max_level(dev_priv) + 1;
3268
3269         drm_modeset_lock_all(dev);
3270
3271         for (level = 0; level < num_levels; level++) {
3272                 unsigned int latency = wm[level];
3273
3274                 /*
3275                  * - WM1+ latency values in 0.5us units
3276                  * - latencies are in us on gen9/vlv/chv
3277                  */
3278                 if (INTEL_GEN(dev_priv) >= 9 ||
3279                     IS_VALLEYVIEW(dev_priv) ||
3280                     IS_CHERRYVIEW(dev_priv) ||
3281                     IS_G4X(dev_priv))
3282                         latency *= 10;
3283                 else if (level > 0)
3284                         latency *= 5;
3285
3286                 seq_printf(m, "WM%d %u (%u.%u usec)\n",
3287                            level, wm[level], latency / 10, latency % 10);
3288         }
3289
3290         drm_modeset_unlock_all(dev);
3291 }
3292
3293 static int pri_wm_latency_show(struct seq_file *m, void *data)
3294 {
3295         struct drm_i915_private *dev_priv = m->private;
3296         const u16 *latencies;
3297
3298         if (INTEL_GEN(dev_priv) >= 9)
3299                 latencies = dev_priv->wm.skl_latency;
3300         else
3301                 latencies = dev_priv->wm.pri_latency;
3302
3303         wm_latency_show(m, latencies);
3304
3305         return 0;
3306 }
3307
3308 static int spr_wm_latency_show(struct seq_file *m, void *data)
3309 {
3310         struct drm_i915_private *dev_priv = m->private;
3311         const u16 *latencies;
3312
3313         if (INTEL_GEN(dev_priv) >= 9)
3314                 latencies = dev_priv->wm.skl_latency;
3315         else
3316                 latencies = dev_priv->wm.spr_latency;
3317
3318         wm_latency_show(m, latencies);
3319
3320         return 0;
3321 }
3322
3323 static int cur_wm_latency_show(struct seq_file *m, void *data)
3324 {
3325         struct drm_i915_private *dev_priv = m->private;
3326         const u16 *latencies;
3327
3328         if (INTEL_GEN(dev_priv) >= 9)
3329                 latencies = dev_priv->wm.skl_latency;
3330         else
3331                 latencies = dev_priv->wm.cur_latency;
3332
3333         wm_latency_show(m, latencies);
3334
3335         return 0;
3336 }
3337
3338 static int pri_wm_latency_open(struct inode *inode, struct file *file)
3339 {
3340         struct drm_i915_private *dev_priv = inode->i_private;
3341
3342         if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
3343                 return -ENODEV;
3344
3345         return single_open(file, pri_wm_latency_show, dev_priv);
3346 }
3347
3348 static int spr_wm_latency_open(struct inode *inode, struct file *file)
3349 {
3350         struct drm_i915_private *dev_priv = inode->i_private;
3351
3352         if (HAS_GMCH(dev_priv))
3353                 return -ENODEV;
3354
3355         return single_open(file, spr_wm_latency_show, dev_priv);
3356 }
3357
3358 static int cur_wm_latency_open(struct inode *inode, struct file *file)
3359 {
3360         struct drm_i915_private *dev_priv = inode->i_private;
3361
3362         if (HAS_GMCH(dev_priv))
3363                 return -ENODEV;
3364
3365         return single_open(file, cur_wm_latency_show, dev_priv);
3366 }
3367
3368 static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
3369                                 size_t len, loff_t *offp, u16 wm[8])
3370 {
3371         struct seq_file *m = file->private_data;
3372         struct drm_i915_private *dev_priv = m->private;
3373         struct drm_device *dev = &dev_priv->drm;
3374         u16 new[8] = { 0 };
3375         int num_levels;
3376         int level;
3377         int ret;
3378         char tmp[32];
3379
3380         if (IS_CHERRYVIEW(dev_priv))
3381                 num_levels = 3;
3382         else if (IS_VALLEYVIEW(dev_priv))
3383                 num_levels = 1;
3384         else if (IS_G4X(dev_priv))
3385                 num_levels = 3;
3386         else
3387                 num_levels = ilk_wm_max_level(dev_priv) + 1;
3388
3389         if (len >= sizeof(tmp))
3390                 return -EINVAL;
3391
3392         if (copy_from_user(tmp, ubuf, len))
3393                 return -EFAULT;
3394
3395         tmp[len] = '\0';
3396
3397         ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
3398                      &new[0], &new[1], &new[2], &new[3],
3399                      &new[4], &new[5], &new[6], &new[7]);
3400         if (ret != num_levels)
3401                 return -EINVAL;
3402
3403         drm_modeset_lock_all(dev);
3404
3405         for (level = 0; level < num_levels; level++)
3406                 wm[level] = new[level];
3407
3408         drm_modeset_unlock_all(dev);
3409
3410         return len;
3411 }
3412
3413
3414 static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
3415                                     size_t len, loff_t *offp)
3416 {
3417         struct seq_file *m = file->private_data;
3418         struct drm_i915_private *dev_priv = m->private;
3419         u16 *latencies;
3420
3421         if (INTEL_GEN(dev_priv) >= 9)
3422                 latencies = dev_priv->wm.skl_latency;
3423         else
3424                 latencies = dev_priv->wm.pri_latency;
3425
3426         return wm_latency_write(file, ubuf, len, offp, latencies);
3427 }
3428
3429 static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
3430                                     size_t len, loff_t *offp)
3431 {
3432         struct seq_file *m = file->private_data;
3433         struct drm_i915_private *dev_priv = m->private;
3434         u16 *latencies;
3435
3436         if (INTEL_GEN(dev_priv) >= 9)
3437                 latencies = dev_priv->wm.skl_latency;
3438         else
3439                 latencies = dev_priv->wm.spr_latency;
3440
3441         return wm_latency_write(file, ubuf, len, offp, latencies);
3442 }
3443
3444 static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
3445                                     size_t len, loff_t *offp)
3446 {
3447         struct seq_file *m = file->private_data;
3448         struct drm_i915_private *dev_priv = m->private;
3449         u16 *latencies;
3450
3451         if (INTEL_GEN(dev_priv) >= 9)
3452                 latencies = dev_priv->wm.skl_latency;
3453         else
3454                 latencies = dev_priv->wm.cur_latency;
3455
3456         return wm_latency_write(file, ubuf, len, offp, latencies);
3457 }
3458
3459 static const struct file_operations i915_pri_wm_latency_fops = {
3460         .owner = THIS_MODULE,
3461         .open = pri_wm_latency_open,
3462         .read = seq_read,
3463         .llseek = seq_lseek,
3464         .release = single_release,
3465         .write = pri_wm_latency_write
3466 };
3467
3468 static const struct file_operations i915_spr_wm_latency_fops = {
3469         .owner = THIS_MODULE,
3470         .open = spr_wm_latency_open,
3471         .read = seq_read,
3472         .llseek = seq_lseek,
3473         .release = single_release,
3474         .write = spr_wm_latency_write
3475 };
3476
3477 static const struct file_operations i915_cur_wm_latency_fops = {
3478         .owner = THIS_MODULE,
3479         .open = cur_wm_latency_open,
3480         .read = seq_read,
3481         .llseek = seq_lseek,
3482         .release = single_release,
3483         .write = cur_wm_latency_write
3484 };
3485
3486 static int
3487 i915_wedged_get(void *data, u64 *val)
3488 {
3489         struct drm_i915_private *i915 = data;
3490         int ret = intel_gt_terminally_wedged(&i915->gt);
3491
3492         switch (ret) {
3493         case -EIO:
3494                 *val = 1;
3495                 return 0;
3496         case 0:
3497                 *val = 0;
3498                 return 0;
3499         default:
3500                 return ret;
3501         }
3502 }
3503
3504 static int
3505 i915_wedged_set(void *data, u64 val)
3506 {
3507         struct drm_i915_private *i915 = data;
3508
3509         /* Flush any previous reset before applying for a new one */
3510         wait_event(i915->gt.reset.queue,
3511                    !test_bit(I915_RESET_BACKOFF, &i915->gt.reset.flags));
3512
3513         intel_gt_handle_error(&i915->gt, val, I915_ERROR_CAPTURE,
3514                               "Manually set wedged engine mask = %llx", val);
3515         return 0;
3516 }
3517
3518 DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
3519                         i915_wedged_get, i915_wedged_set,
3520                         "%llu\n");
3521
3522 static int
3523 i915_perf_noa_delay_set(void *data, u64 val)
3524 {
3525         struct drm_i915_private *i915 = data;
3526         const u32 clk = RUNTIME_INFO(i915)->cs_timestamp_frequency_khz;
3527
3528         /*
3529          * This would lead to infinite waits as we're doing timestamp
3530          * difference on the CS with only 32bits.
3531          */
3532         if (val > mul_u32_u32(U32_MAX, clk))
3533                 return -EINVAL;
3534
3535         atomic64_set(&i915->perf.noa_programming_delay, val);
3536         return 0;
3537 }
3538
3539 static int
3540 i915_perf_noa_delay_get(void *data, u64 *val)
3541 {
3542         struct drm_i915_private *i915 = data;
3543
3544         *val = atomic64_read(&i915->perf.noa_programming_delay);
3545         return 0;
3546 }
3547
3548 DEFINE_SIMPLE_ATTRIBUTE(i915_perf_noa_delay_fops,
3549                         i915_perf_noa_delay_get,
3550                         i915_perf_noa_delay_set,
3551                         "%llu\n");
3552
3553 #define DROP_UNBOUND    BIT(0)
3554 #define DROP_BOUND      BIT(1)
3555 #define DROP_RETIRE     BIT(2)
3556 #define DROP_ACTIVE     BIT(3)
3557 #define DROP_FREED      BIT(4)
3558 #define DROP_SHRINK_ALL BIT(5)
3559 #define DROP_IDLE       BIT(6)
3560 #define DROP_RESET_ACTIVE       BIT(7)
3561 #define DROP_RESET_SEQNO        BIT(8)
3562 #define DROP_RCU        BIT(9)
3563 #define DROP_ALL (DROP_UNBOUND  | \
3564                   DROP_BOUND    | \
3565                   DROP_RETIRE   | \
3566                   DROP_ACTIVE   | \
3567                   DROP_FREED    | \
3568                   DROP_SHRINK_ALL |\
3569                   DROP_IDLE     | \
3570                   DROP_RESET_ACTIVE | \
3571                   DROP_RESET_SEQNO | \
3572                   DROP_RCU)
3573 static int
3574 i915_drop_caches_get(void *data, u64 *val)
3575 {
3576         *val = DROP_ALL;
3577
3578         return 0;
3579 }
3580 static int
3581 gt_drop_caches(struct intel_gt *gt, u64 val)
3582 {
3583         int ret;
3584
3585         if (val & DROP_RESET_ACTIVE &&
3586             wait_for(intel_engines_are_idle(gt), I915_IDLE_ENGINES_TIMEOUT))
3587                 intel_gt_set_wedged(gt);
3588
3589         if (val & DROP_RETIRE)
3590                 intel_gt_retire_requests(gt);
3591
3592         if (val & (DROP_IDLE | DROP_ACTIVE)) {
3593                 ret = intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
3594                 if (ret)
3595                         return ret;
3596         }
3597
3598         if (val & DROP_IDLE) {
3599                 ret = intel_gt_pm_wait_for_idle(gt);
3600                 if (ret)
3601                         return ret;
3602         }
3603
3604         if (val & DROP_RESET_ACTIVE && intel_gt_terminally_wedged(gt))
3605                 intel_gt_handle_error(gt, ALL_ENGINES, 0, NULL);
3606
3607         return 0;
3608 }
3609
3610 static int
3611 i915_drop_caches_set(void *data, u64 val)
3612 {
3613         struct drm_i915_private *i915 = data;
3614         int ret;
3615
3616         DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
3617                   val, val & DROP_ALL);
3618
3619         ret = gt_drop_caches(&i915->gt, val);
3620         if (ret)
3621                 return ret;
3622
3623         fs_reclaim_acquire(GFP_KERNEL);
3624         if (val & DROP_BOUND)
3625                 i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_BOUND);
3626
3627         if (val & DROP_UNBOUND)
3628                 i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_UNBOUND);
3629
3630         if (val & DROP_SHRINK_ALL)
3631                 i915_gem_shrink_all(i915);
3632         fs_reclaim_release(GFP_KERNEL);
3633
3634         if (val & DROP_RCU)
3635                 rcu_barrier();
3636
3637         if (val & DROP_FREED)
3638                 i915_gem_drain_freed_objects(i915);
3639
3640         return 0;
3641 }
3642
3643 DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
3644                         i915_drop_caches_get, i915_drop_caches_set,
3645                         "0x%08llx\n");
3646
3647 static int
3648 i915_cache_sharing_get(void *data, u64 *val)
3649 {
3650         struct drm_i915_private *dev_priv = data;
3651         intel_wakeref_t wakeref;
3652         u32 snpcr = 0;
3653
3654         if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
3655                 return -ENODEV;
3656
3657         with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
3658                 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
3659
3660         *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
3661
3662         return 0;
3663 }
3664
3665 static int
3666 i915_cache_sharing_set(void *data, u64 val)
3667 {
3668         struct drm_i915_private *dev_priv = data;
3669         intel_wakeref_t wakeref;
3670
3671         if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
3672                 return -ENODEV;
3673
3674         if (val > 3)
3675                 return -EINVAL;
3676
3677         DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
3678         with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
3679                 u32 snpcr;
3680
3681                 /* Update the cache sharing policy here as well */
3682                 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
3683                 snpcr &= ~GEN6_MBC_SNPCR_MASK;
3684                 snpcr |= val << GEN6_MBC_SNPCR_SHIFT;
3685                 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
3686         }
3687
3688         return 0;
3689 }
3690
3691 static void
3692 intel_sseu_copy_subslices(const struct sseu_dev_info *sseu, int slice,
3693                           u8 *to_mask)
3694 {
3695         int offset = slice * sseu->ss_stride;
3696
3697         memcpy(&to_mask[offset], &sseu->subslice_mask[offset], sseu->ss_stride);
3698 }
3699
3700 DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
3701                         i915_cache_sharing_get, i915_cache_sharing_set,
3702                         "%llu\n");
3703
3704 static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv,
3705                                           struct sseu_dev_info *sseu)
3706 {
3707 #define SS_MAX 2
3708         const int ss_max = SS_MAX;
3709         u32 sig1[SS_MAX], sig2[SS_MAX];
3710         int ss;
3711
3712         sig1[0] = I915_READ(CHV_POWER_SS0_SIG1);
3713         sig1[1] = I915_READ(CHV_POWER_SS1_SIG1);
3714         sig2[0] = I915_READ(CHV_POWER_SS0_SIG2);
3715         sig2[1] = I915_READ(CHV_POWER_SS1_SIG2);
3716
3717         for (ss = 0; ss < ss_max; ss++) {
3718                 unsigned int eu_cnt;
3719
3720                 if (sig1[ss] & CHV_SS_PG_ENABLE)
3721                         /* skip disabled subslice */
3722                         continue;
3723
3724                 sseu->slice_mask = BIT(0);
3725                 sseu->subslice_mask[0] |= BIT(ss);
3726                 eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
3727                          ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
3728                          ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
3729                          ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
3730                 sseu->eu_total += eu_cnt;
3731                 sseu->eu_per_subslice = max_t(unsigned int,
3732                                               sseu->eu_per_subslice, eu_cnt);
3733         }
3734 #undef SS_MAX
3735 }
3736
3737 static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
3738                                      struct sseu_dev_info *sseu)
3739 {
3740 #define SS_MAX 6
3741         const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
3742         u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
3743         int s, ss;
3744
3745         for (s = 0; s < info->sseu.max_slices; s++) {
3746                 /*
3747                  * FIXME: Valid SS Mask respects the spec and read
3748                  * only valid bits for those registers, excluding reserved
3749                  * although this seems wrong because it would leave many
3750                  * subslices without ACK.
3751                  */
3752                 s_reg[s] = I915_READ(GEN10_SLICE_PGCTL_ACK(s)) &
3753                         GEN10_PGCTL_VALID_SS_MASK(s);
3754                 eu_reg[2 * s] = I915_READ(GEN10_SS01_EU_PGCTL_ACK(s));
3755                 eu_reg[2 * s + 1] = I915_READ(GEN10_SS23_EU_PGCTL_ACK(s));
3756         }
3757
3758         eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
3759                      GEN9_PGCTL_SSA_EU19_ACK |
3760                      GEN9_PGCTL_SSA_EU210_ACK |
3761                      GEN9_PGCTL_SSA_EU311_ACK;
3762         eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
3763                      GEN9_PGCTL_SSB_EU19_ACK |
3764                      GEN9_PGCTL_SSB_EU210_ACK |
3765                      GEN9_PGCTL_SSB_EU311_ACK;
3766
3767         for (s = 0; s < info->sseu.max_slices; s++) {
3768                 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
3769                         /* skip disabled slice */
3770                         continue;
3771
3772                 sseu->slice_mask |= BIT(s);
3773                 intel_sseu_copy_subslices(&info->sseu, s, sseu->subslice_mask);
3774
3775                 for (ss = 0; ss < info->sseu.max_subslices; ss++) {
3776                         unsigned int eu_cnt;
3777
3778                         if (info->sseu.has_subslice_pg &&
3779                             !(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
3780                                 /* skip disabled subslice */
3781                                 continue;
3782
3783                         eu_cnt = 2 * hweight32(eu_reg[2 * s + ss / 2] &
3784                                                eu_mask[ss % 2]);
3785                         sseu->eu_total += eu_cnt;
3786                         sseu->eu_per_subslice = max_t(unsigned int,
3787                                                       sseu->eu_per_subslice,
3788                                                       eu_cnt);
3789                 }
3790         }
3791 #undef SS_MAX
3792 }
3793
3794 static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
3795                                     struct sseu_dev_info *sseu)
3796 {
3797 #define SS_MAX 3
3798         const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
3799         u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
3800         int s, ss;
3801
3802         for (s = 0; s < info->sseu.max_slices; s++) {
3803                 s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s));
3804                 eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s));
3805                 eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s));
3806         }
3807
3808         eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
3809                      GEN9_PGCTL_SSA_EU19_ACK |
3810                      GEN9_PGCTL_SSA_EU210_ACK |
3811                      GEN9_PGCTL_SSA_EU311_ACK;
3812         eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
3813                      GEN9_PGCTL_SSB_EU19_ACK |
3814                      GEN9_PGCTL_SSB_EU210_ACK |
3815                      GEN9_PGCTL_SSB_EU311_ACK;
3816
3817         for (s = 0; s < info->sseu.max_slices; s++) {
3818                 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
3819                         /* skip disabled slice */
3820                         continue;
3821
3822                 sseu->slice_mask |= BIT(s);
3823
3824                 if (IS_GEN9_BC(dev_priv))
3825                         intel_sseu_copy_subslices(&info->sseu, s,
3826                                                   sseu->subslice_mask);
3827
3828                 for (ss = 0; ss < info->sseu.max_subslices; ss++) {
3829                         unsigned int eu_cnt;
3830                         u8 ss_idx = s * info->sseu.ss_stride +
3831                                     ss / BITS_PER_BYTE;
3832
3833                         if (IS_GEN9_LP(dev_priv)) {
3834                                 if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
3835                                         /* skip disabled subslice */
3836                                         continue;
3837
3838                                 sseu->subslice_mask[ss_idx] |=
3839                                         BIT(ss % BITS_PER_BYTE);
3840                         }
3841
3842                         eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
3843                                                eu_mask[ss%2]);
3844                         sseu->eu_total += eu_cnt;
3845                         sseu->eu_per_subslice = max_t(unsigned int,
3846                                                       sseu->eu_per_subslice,
3847                                                       eu_cnt);
3848                 }
3849         }
3850 #undef SS_MAX
3851 }
3852
3853 static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
3854                                          struct sseu_dev_info *sseu)
3855 {
3856         const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
3857         u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
3858         int s;
3859
3860         sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK;
3861
3862         if (sseu->slice_mask) {
3863                 sseu->eu_per_subslice = info->sseu.eu_per_subslice;
3864                 for (s = 0; s < fls(sseu->slice_mask); s++)
3865                         intel_sseu_copy_subslices(&info->sseu, s,
3866                                                   sseu->subslice_mask);
3867                 sseu->eu_total = sseu->eu_per_subslice *
3868                                  intel_sseu_subslice_total(sseu);
3869
3870                 /* subtract fused off EU(s) from enabled slice(s) */
3871                 for (s = 0; s < fls(sseu->slice_mask); s++) {
3872                         u8 subslice_7eu = info->sseu.subslice_7eu[s];
3873
3874                         sseu->eu_total -= hweight8(subslice_7eu);
3875                 }
3876         }
3877 }
3878
3879 static void i915_print_sseu_info(struct seq_file *m, bool is_available_info,
3880                                  const struct sseu_dev_info *sseu)
3881 {
3882         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3883         const char *type = is_available_info ? "Available" : "Enabled";
3884         int s;
3885
3886         seq_printf(m, "  %s Slice Mask: %04x\n", type,
3887                    sseu->slice_mask);
3888         seq_printf(m, "  %s Slice Total: %u\n", type,
3889                    hweight8(sseu->slice_mask));
3890         seq_printf(m, "  %s Subslice Total: %u\n", type,
3891                    intel_sseu_subslice_total(sseu));
3892         for (s = 0; s < fls(sseu->slice_mask); s++) {
3893                 seq_printf(m, "  %s Slice%i subslices: %u\n", type,
3894                            s, intel_sseu_subslices_per_slice(sseu, s));
3895         }
3896         seq_printf(m, "  %s EU Total: %u\n", type,
3897                    sseu->eu_total);
3898         seq_printf(m, "  %s EU Per Subslice: %u\n", type,
3899                    sseu->eu_per_subslice);
3900
3901         if (!is_available_info)
3902                 return;
3903
3904         seq_printf(m, "  Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev_priv)));
3905         if (HAS_POOLED_EU(dev_priv))
3906                 seq_printf(m, "  Min EU in pool: %u\n", sseu->min_eu_in_pool);
3907
3908         seq_printf(m, "  Has Slice Power Gating: %s\n",
3909                    yesno(sseu->has_slice_pg));
3910         seq_printf(m, "  Has Subslice Power Gating: %s\n",
3911                    yesno(sseu->has_subslice_pg));
3912         seq_printf(m, "  Has EU Power Gating: %s\n",
3913                    yesno(sseu->has_eu_pg));
3914 }
3915
3916 static int i915_sseu_status(struct seq_file *m, void *unused)
3917 {
3918         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3919         const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
3920         struct sseu_dev_info sseu;
3921         intel_wakeref_t wakeref;
3922
3923         if (INTEL_GEN(dev_priv) < 8)
3924                 return -ENODEV;
3925
3926         seq_puts(m, "SSEU Device Info\n");
3927         i915_print_sseu_info(m, true, &info->sseu);
3928
3929         seq_puts(m, "SSEU Device Status\n");
3930         memset(&sseu, 0, sizeof(sseu));
3931         intel_sseu_set_info(&sseu, info->sseu.max_slices,
3932                             info->sseu.max_subslices,
3933                             info->sseu.max_eus_per_subslice);
3934
3935         with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
3936                 if (IS_CHERRYVIEW(dev_priv))
3937                         cherryview_sseu_device_status(dev_priv, &sseu);
3938                 else if (IS_BROADWELL(dev_priv))
3939                         broadwell_sseu_device_status(dev_priv, &sseu);
3940                 else if (IS_GEN(dev_priv, 9))
3941                         gen9_sseu_device_status(dev_priv, &sseu);
3942                 else if (INTEL_GEN(dev_priv) >= 10)
3943                         gen10_sseu_device_status(dev_priv, &sseu);
3944         }
3945
3946         i915_print_sseu_info(m, false, &sseu);
3947
3948         return 0;
3949 }
3950
3951 static int i915_forcewake_open(struct inode *inode, struct file *file)
3952 {
3953         struct drm_i915_private *i915 = inode->i_private;
3954         struct intel_gt *gt = &i915->gt;
3955
3956         atomic_inc(&gt->user_wakeref);
3957         intel_gt_pm_get(gt);
3958         if (INTEL_GEN(i915) >= 6)
3959                 intel_uncore_forcewake_user_get(gt->uncore);
3960
3961         return 0;
3962 }
3963
3964 static int i915_forcewake_release(struct inode *inode, struct file *file)
3965 {
3966         struct drm_i915_private *i915 = inode->i_private;
3967         struct intel_gt *gt = &i915->gt;
3968
3969         if (INTEL_GEN(i915) >= 6)
3970                 intel_uncore_forcewake_user_put(&i915->uncore);
3971         intel_gt_pm_put(gt);
3972         atomic_dec(&gt->user_wakeref);
3973
3974         return 0;
3975 }
3976
3977 static const struct file_operations i915_forcewake_fops = {
3978         .owner = THIS_MODULE,
3979         .open = i915_forcewake_open,
3980         .release = i915_forcewake_release,
3981 };
3982
3983 static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
3984 {
3985         struct drm_i915_private *dev_priv = m->private;
3986         struct i915_hotplug *hotplug = &dev_priv->hotplug;
3987
3988         /* Synchronize with everything first in case there's been an HPD
3989          * storm, but we haven't finished handling it in the kernel yet
3990          */
3991         intel_synchronize_irq(dev_priv);
3992         flush_work(&dev_priv->hotplug.dig_port_work);
3993         flush_delayed_work(&dev_priv->hotplug.hotplug_work);
3994
3995         seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
3996         seq_printf(m, "Detected: %s\n",
3997                    yesno(delayed_work_pending(&hotplug->reenable_work)));
3998
3999         return 0;
4000 }
4001
4002 static ssize_t i915_hpd_storm_ctl_write(struct file *file,
4003                                         const char __user *ubuf, size_t len,
4004                                         loff_t *offp)
4005 {
4006         struct seq_file *m = file->private_data;
4007         struct drm_i915_private *dev_priv = m->private;
4008         struct i915_hotplug *hotplug = &dev_priv->hotplug;
4009         unsigned int new_threshold;
4010         int i;
4011         char *newline;
4012         char tmp[16];
4013
4014         if (len >= sizeof(tmp))
4015                 return -EINVAL;
4016
4017         if (copy_from_user(tmp, ubuf, len))
4018                 return -EFAULT;
4019
4020         tmp[len] = '\0';
4021
4022         /* Strip newline, if any */
4023         newline = strchr(tmp, '\n');
4024         if (newline)
4025                 *newline = '\0';
4026
4027         if (strcmp(tmp, "reset") == 0)
4028                 new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
4029         else if (kstrtouint(tmp, 10, &new_threshold) != 0)
4030                 return -EINVAL;
4031
4032         if (new_threshold > 0)
4033                 DRM_DEBUG_KMS("Setting HPD storm detection threshold to %d\n",
4034                               new_threshold);
4035         else
4036                 DRM_DEBUG_KMS("Disabling HPD storm detection\n");
4037
4038         spin_lock_irq(&dev_priv->irq_lock);
4039         hotplug->hpd_storm_threshold = new_threshold;
4040         /* Reset the HPD storm stats so we don't accidentally trigger a storm */
4041         for_each_hpd_pin(i)
4042                 hotplug->stats[i].count = 0;
4043         spin_unlock_irq(&dev_priv->irq_lock);
4044
4045         /* Re-enable hpd immediately if we were in an irq storm */
4046         flush_delayed_work(&dev_priv->hotplug.reenable_work);
4047
4048         return len;
4049 }
4050
4051 static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
4052 {
4053         return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
4054 }
4055
4056 static const struct file_operations i915_hpd_storm_ctl_fops = {
4057         .owner = THIS_MODULE,
4058         .open = i915_hpd_storm_ctl_open,
4059         .read = seq_read,
4060         .llseek = seq_lseek,
4061         .release = single_release,
4062         .write = i915_hpd_storm_ctl_write
4063 };
4064
4065 static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data)
4066 {
4067         struct drm_i915_private *dev_priv = m->private;
4068
4069         seq_printf(m, "Enabled: %s\n",
4070                    yesno(dev_priv->hotplug.hpd_short_storm_enabled));
4071
4072         return 0;
4073 }
4074
4075 static int
4076 i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file)
4077 {
4078         return single_open(file, i915_hpd_short_storm_ctl_show,
4079                            inode->i_private);
4080 }
4081
4082 static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
4083                                               const char __user *ubuf,
4084                                               size_t len, loff_t *offp)
4085 {
4086         struct seq_file *m = file->private_data;
4087         struct drm_i915_private *dev_priv = m->private;
4088         struct i915_hotplug *hotplug = &dev_priv->hotplug;
4089         char *newline;
4090         char tmp[16];
4091         int i;
4092         bool new_state;
4093
4094         if (len >= sizeof(tmp))
4095                 return -EINVAL;
4096
4097         if (copy_from_user(tmp, ubuf, len))
4098                 return -EFAULT;
4099
4100         tmp[len] = '\0';
4101
4102         /* Strip newline, if any */
4103         newline = strchr(tmp, '\n');
4104         if (newline)
4105                 *newline = '\0';
4106
4107         /* Reset to the "default" state for this system */
4108         if (strcmp(tmp, "reset") == 0)
4109                 new_state = !HAS_DP_MST(dev_priv);
4110         else if (kstrtobool(tmp, &new_state) != 0)
4111                 return -EINVAL;
4112
4113         DRM_DEBUG_KMS("%sabling HPD short storm detection\n",
4114                       new_state ? "En" : "Dis");
4115
4116         spin_lock_irq(&dev_priv->irq_lock);
4117         hotplug->hpd_short_storm_enabled = new_state;
4118         /* Reset the HPD storm stats so we don't accidentally trigger a storm */
4119         for_each_hpd_pin(i)
4120                 hotplug->stats[i].count = 0;
4121         spin_unlock_irq(&dev_priv->irq_lock);
4122
4123         /* Re-enable hpd immediately if we were in an irq storm */
4124         flush_delayed_work(&dev_priv->hotplug.reenable_work);
4125
4126         return len;
4127 }
4128
4129 static const struct file_operations i915_hpd_short_storm_ctl_fops = {
4130         .owner = THIS_MODULE,
4131         .open = i915_hpd_short_storm_ctl_open,
4132         .read = seq_read,
4133         .llseek = seq_lseek,
4134         .release = single_release,
4135         .write = i915_hpd_short_storm_ctl_write,
4136 };
4137
4138 static int i915_drrs_ctl_set(void *data, u64 val)
4139 {
4140         struct drm_i915_private *dev_priv = data;
4141         struct drm_device *dev = &dev_priv->drm;
4142         struct intel_crtc *crtc;
4143
4144         if (INTEL_GEN(dev_priv) < 7)
4145                 return -ENODEV;
4146
4147         for_each_intel_crtc(dev, crtc) {
4148                 struct drm_connector_list_iter conn_iter;
4149                 struct intel_crtc_state *crtc_state;
4150                 struct drm_connector *connector;
4151                 struct drm_crtc_commit *commit;
4152                 int ret;
4153
4154                 ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex);
4155                 if (ret)
4156                         return ret;
4157
4158                 crtc_state = to_intel_crtc_state(crtc->base.state);
4159
4160                 if (!crtc_state->hw.active ||
4161                     !crtc_state->has_drrs)
4162                         goto out;
4163
4164                 commit = crtc_state->uapi.commit;
4165                 if (commit) {
4166                         ret = wait_for_completion_interruptible(&commit->hw_done);
4167                         if (ret)
4168                                 goto out;
4169                 }
4170
4171                 drm_connector_list_iter_begin(dev, &conn_iter);
4172                 drm_for_each_connector_iter(connector, &conn_iter) {
4173                         struct intel_encoder *encoder;
4174                         struct intel_dp *intel_dp;
4175
4176                         if (!(crtc_state->uapi.connector_mask &
4177                               drm_connector_mask(connector)))
4178                                 continue;
4179
4180                         encoder = intel_attached_encoder(connector);
4181                         if (encoder->type != INTEL_OUTPUT_EDP)
4182                                 continue;
4183
4184                         DRM_DEBUG_DRIVER("Manually %sabling DRRS. %llu\n",
4185                                                 val ? "en" : "dis", val);
4186
4187                         intel_dp = enc_to_intel_dp(&encoder->base);
4188                         if (val)
4189                                 intel_edp_drrs_enable(intel_dp,
4190                                                       crtc_state);
4191                         else
4192                                 intel_edp_drrs_disable(intel_dp,
4193                                                        crtc_state);
4194                 }
4195                 drm_connector_list_iter_end(&conn_iter);
4196
4197 out:
4198                 drm_modeset_unlock(&crtc->base.mutex);
4199                 if (ret)
4200                         return ret;
4201         }
4202
4203         return 0;
4204 }
4205
4206 DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n");
4207
4208 static ssize_t
4209 i915_fifo_underrun_reset_write(struct file *filp,
4210                                const char __user *ubuf,
4211                                size_t cnt, loff_t *ppos)
4212 {
4213         struct drm_i915_private *dev_priv = filp->private_data;
4214         struct intel_crtc *intel_crtc;
4215         struct drm_device *dev = &dev_priv->drm;
4216         int ret;
4217         bool reset;
4218
4219         ret = kstrtobool_from_user(ubuf, cnt, &reset);
4220         if (ret)
4221                 return ret;
4222
4223         if (!reset)
4224                 return cnt;
4225
4226         for_each_intel_crtc(dev, intel_crtc) {
4227                 struct drm_crtc_commit *commit;
4228                 struct intel_crtc_state *crtc_state;
4229
4230                 ret = drm_modeset_lock_single_interruptible(&intel_crtc->base.mutex);
4231                 if (ret)
4232                         return ret;
4233
4234                 crtc_state = to_intel_crtc_state(intel_crtc->base.state);
4235                 commit = crtc_state->uapi.commit;
4236                 if (commit) {
4237                         ret = wait_for_completion_interruptible(&commit->hw_done);
4238                         if (!ret)
4239                                 ret = wait_for_completion_interruptible(&commit->flip_done);
4240                 }
4241
4242                 if (!ret && crtc_state->hw.active) {
4243                         DRM_DEBUG_KMS("Re-arming FIFO underruns on pipe %c\n",
4244                                       pipe_name(intel_crtc->pipe));
4245
4246                         intel_crtc_arm_fifo_underrun(intel_crtc, crtc_state);
4247                 }
4248
4249                 drm_modeset_unlock(&intel_crtc->base.mutex);
4250
4251                 if (ret)
4252                         return ret;
4253         }
4254
4255         ret = intel_fbc_reset_underrun(dev_priv);
4256         if (ret)
4257                 return ret;
4258
4259         return cnt;
4260 }
4261
4262 static const struct file_operations i915_fifo_underrun_reset_ops = {
4263         .owner = THIS_MODULE,
4264         .open = simple_open,
4265         .write = i915_fifo_underrun_reset_write,
4266         .llseek = default_llseek,
4267 };
4268
4269 static const struct drm_info_list i915_debugfs_list[] = {
4270         {"i915_capabilities", i915_capabilities, 0},
4271         {"i915_gem_objects", i915_gem_object_info, 0},
4272         {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
4273         {"i915_gem_interrupt", i915_interrupt_info, 0},
4274         {"i915_guc_info", i915_guc_info, 0},
4275         {"i915_guc_load_status", i915_guc_load_status_info, 0},
4276         {"i915_guc_log_dump", i915_guc_log_dump, 0},
4277         {"i915_guc_load_err_log_dump", i915_guc_log_dump, 0, (void *)1},
4278         {"i915_guc_stage_pool", i915_guc_stage_pool, 0},
4279         {"i915_huc_load_status", i915_huc_load_status_info, 0},
4280         {"i915_frequency_info", i915_frequency_info, 0},
4281         {"i915_drpc_info", i915_drpc_info, 0},
4282         {"i915_ring_freq_table", i915_ring_freq_table, 0},
4283         {"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
4284         {"i915_fbc_status", i915_fbc_status, 0},
4285         {"i915_ips_status", i915_ips_status, 0},
4286         {"i915_sr_status", i915_sr_status, 0},
4287         {"i915_opregion", i915_opregion, 0},
4288         {"i915_vbt", i915_vbt, 0},
4289         {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
4290         {"i915_context_status", i915_context_status, 0},
4291         {"i915_forcewake_domains", i915_forcewake_domains, 0},
4292         {"i915_swizzle_info", i915_swizzle_info, 0},
4293         {"i915_llc", i915_llc, 0},
4294         {"i915_edp_psr_status", i915_edp_psr_status, 0},
4295         {"i915_energy_uJ", i915_energy_uJ, 0},
4296         {"i915_runtime_pm_status", i915_runtime_pm_status, 0},
4297         {"i915_power_domain_info", i915_power_domain_info, 0},
4298         {"i915_dmc_info", i915_dmc_info, 0},
4299         {"i915_display_info", i915_display_info, 0},
4300         {"i915_engine_info", i915_engine_info, 0},
4301         {"i915_rcs_topology", i915_rcs_topology, 0},
4302         {"i915_shrinker_info", i915_shrinker_info, 0},
4303         {"i915_shared_dplls_info", i915_shared_dplls_info, 0},
4304         {"i915_dp_mst_info", i915_dp_mst_info, 0},
4305         {"i915_wa_registers", i915_wa_registers, 0},
4306         {"i915_ddb_info", i915_ddb_info, 0},
4307         {"i915_sseu_status", i915_sseu_status, 0},
4308         {"i915_drrs_status", i915_drrs_status, 0},
4309         {"i915_rps_boost_info", i915_rps_boost_info, 0},
4310 };
4311 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
4312
4313 static const struct i915_debugfs_files {
4314         const char *name;
4315         const struct file_operations *fops;
4316 } i915_debugfs_files[] = {
4317         {"i915_perf_noa_delay", &i915_perf_noa_delay_fops},
4318         {"i915_wedged", &i915_wedged_fops},
4319         {"i915_cache_sharing", &i915_cache_sharing_fops},
4320         {"i915_gem_drop_caches", &i915_drop_caches_fops},
4321 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
4322         {"i915_error_state", &i915_error_state_fops},
4323         {"i915_gpu_info", &i915_gpu_info_fops},
4324 #endif
4325         {"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
4326         {"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
4327         {"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
4328         {"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
4329         {"i915_fbc_false_color", &i915_fbc_false_color_fops},
4330         {"i915_dp_test_data", &i915_displayport_test_data_fops},
4331         {"i915_dp_test_type", &i915_displayport_test_type_fops},
4332         {"i915_dp_test_active", &i915_displayport_test_active_fops},
4333         {"i915_guc_log_level", &i915_guc_log_level_fops},
4334         {"i915_guc_log_relay", &i915_guc_log_relay_fops},
4335         {"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
4336         {"i915_hpd_short_storm_ctl", &i915_hpd_short_storm_ctl_fops},
4337         {"i915_ipc_status", &i915_ipc_status_fops},
4338         {"i915_drrs_ctl", &i915_drrs_ctl_fops},
4339         {"i915_edp_psr_debug", &i915_edp_psr_debug_fops}
4340 };
4341
4342 int i915_debugfs_register(struct drm_i915_private *dev_priv)
4343 {
4344         struct drm_minor *minor = dev_priv->drm.primary;
4345         int i;
4346
4347         debugfs_create_file("i915_forcewake_user", S_IRUSR, minor->debugfs_root,
4348                             to_i915(minor->dev), &i915_forcewake_fops);
4349
4350         for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
4351                 debugfs_create_file(i915_debugfs_files[i].name,
4352                                     S_IRUGO | S_IWUSR,
4353                                     minor->debugfs_root,
4354                                     to_i915(minor->dev),
4355                                     i915_debugfs_files[i].fops);
4356         }
4357
4358         return drm_debugfs_create_files(i915_debugfs_list,
4359                                         I915_DEBUGFS_ENTRIES,
4360                                         minor->debugfs_root, minor);
4361 }
4362
4363 struct dpcd_block {
4364         /* DPCD dump start address. */
4365         unsigned int offset;
4366         /* DPCD dump end address, inclusive. If unset, .size will be used. */
4367         unsigned int end;
4368         /* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */
4369         size_t size;
4370         /* Only valid for eDP. */
4371         bool edp;
4372 };
4373
4374 static const struct dpcd_block i915_dpcd_debug[] = {
4375         { .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE },
4376         { .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS },
4377         { .offset = DP_DOWNSTREAM_PORT_0, .size = 16 },
4378         { .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET },
4379         { .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 },
4380         { .offset = DP_SET_POWER },
4381         { .offset = DP_EDP_DPCD_REV },
4382         { .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 },
4383         { .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB },
4384         { .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET },
4385 };
4386
4387 static int i915_dpcd_show(struct seq_file *m, void *data)
4388 {
4389         struct drm_connector *connector = m->private;
4390         struct intel_dp *intel_dp =
4391                 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4392         u8 buf[16];
4393         ssize_t err;
4394         int i;
4395
4396         if (connector->status != connector_status_connected)
4397                 return -ENODEV;
4398
4399         for (i = 0; i < ARRAY_SIZE(i915_dpcd_debug); i++) {
4400                 const struct dpcd_block *b = &i915_dpcd_debug[i];
4401                 size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1);
4402
4403                 if (b->edp &&
4404                     connector->connector_type != DRM_MODE_CONNECTOR_eDP)
4405                         continue;
4406
4407                 /* low tech for now */
4408                 if (WARN_ON(size > sizeof(buf)))
4409                         continue;
4410
4411                 err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size);
4412                 if (err < 0)
4413                         seq_printf(m, "%04x: ERROR %d\n", b->offset, (int)err);
4414                 else
4415                         seq_printf(m, "%04x: %*ph\n", b->offset, (int)err, buf);
4416         }
4417
4418         return 0;
4419 }
4420 DEFINE_SHOW_ATTRIBUTE(i915_dpcd);
4421
4422 static int i915_panel_show(struct seq_file *m, void *data)
4423 {
4424         struct drm_connector *connector = m->private;
4425         struct intel_dp *intel_dp =
4426                 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4427
4428         if (connector->status != connector_status_connected)
4429                 return -ENODEV;
4430
4431         seq_printf(m, "Panel power up delay: %d\n",
4432                    intel_dp->panel_power_up_delay);
4433         seq_printf(m, "Panel power down delay: %d\n",
4434                    intel_dp->panel_power_down_delay);
4435         seq_printf(m, "Backlight on delay: %d\n",
4436                    intel_dp->backlight_on_delay);
4437         seq_printf(m, "Backlight off delay: %d\n",
4438                    intel_dp->backlight_off_delay);
4439
4440         return 0;
4441 }
4442 DEFINE_SHOW_ATTRIBUTE(i915_panel);
4443
4444 static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
4445 {
4446         struct drm_connector *connector = m->private;
4447         struct intel_connector *intel_connector = to_intel_connector(connector);
4448
4449         if (connector->status != connector_status_connected)
4450                 return -ENODEV;
4451
4452         /* HDCP is supported by connector */
4453         if (!intel_connector->hdcp.shim)
4454                 return -EINVAL;
4455
4456         seq_printf(m, "%s:%d HDCP version: ", connector->name,
4457                    connector->base.id);
4458         intel_hdcp_info(m, intel_connector);
4459
4460         return 0;
4461 }
4462 DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
4463
4464 static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
4465 {
4466         struct drm_connector *connector = m->private;
4467         struct drm_device *dev = connector->dev;
4468         struct drm_crtc *crtc;
4469         struct intel_dp *intel_dp;
4470         struct drm_modeset_acquire_ctx ctx;
4471         struct intel_crtc_state *crtc_state = NULL;
4472         int ret = 0;
4473         bool try_again = false;
4474
4475         drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
4476
4477         do {
4478                 try_again = false;
4479                 ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
4480                                        &ctx);
4481                 if (ret) {
4482                         if (ret == -EDEADLK && !drm_modeset_backoff(&ctx)) {
4483                                 try_again = true;
4484                                 continue;
4485                         }
4486                         break;
4487                 }
4488                 crtc = connector->state->crtc;
4489                 if (connector->status != connector_status_connected || !crtc) {
4490                         ret = -ENODEV;
4491                         break;
4492                 }
4493                 ret = drm_modeset_lock(&crtc->mutex, &ctx);
4494                 if (ret == -EDEADLK) {
4495                         ret = drm_modeset_backoff(&ctx);
4496                         if (!ret) {
4497                                 try_again = true;
4498                                 continue;
4499                         }
4500                         break;
4501                 } else if (ret) {
4502                         break;
4503                 }
4504                 intel_dp = enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4505                 crtc_state = to_intel_crtc_state(crtc->state);
4506                 seq_printf(m, "DSC_Enabled: %s\n",
4507                            yesno(crtc_state->dsc.compression_enable));
4508                 seq_printf(m, "DSC_Sink_Support: %s\n",
4509                            yesno(drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)));
4510                 seq_printf(m, "Force_DSC_Enable: %s\n",
4511                            yesno(intel_dp->force_dsc_en));
4512                 if (!intel_dp_is_edp(intel_dp))
4513                         seq_printf(m, "FEC_Sink_Support: %s\n",
4514                                    yesno(drm_dp_sink_supports_fec(intel_dp->fec_capable)));
4515         } while (try_again);
4516
4517         drm_modeset_drop_locks(&ctx);
4518         drm_modeset_acquire_fini(&ctx);
4519
4520         return ret;
4521 }
4522
4523 static ssize_t i915_dsc_fec_support_write(struct file *file,
4524                                           const char __user *ubuf,
4525                                           size_t len, loff_t *offp)
4526 {
4527         bool dsc_enable = false;
4528         int ret;
4529         struct drm_connector *connector =
4530                 ((struct seq_file *)file->private_data)->private;
4531         struct intel_encoder *encoder = intel_attached_encoder(connector);
4532         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
4533
4534         if (len == 0)
4535                 return 0;
4536
4537         DRM_DEBUG_DRIVER("Copied %zu bytes from user to force DSC\n",
4538                          len);
4539
4540         ret = kstrtobool_from_user(ubuf, len, &dsc_enable);
4541         if (ret < 0)
4542                 return ret;
4543
4544         DRM_DEBUG_DRIVER("Got %s for DSC Enable\n",
4545                          (dsc_enable) ? "true" : "false");
4546         intel_dp->force_dsc_en = dsc_enable;
4547
4548         *offp += len;
4549         return len;
4550 }
4551
4552 static int i915_dsc_fec_support_open(struct inode *inode,
4553                                      struct file *file)
4554 {
4555         return single_open(file, i915_dsc_fec_support_show,
4556                            inode->i_private);
4557 }
4558
4559 static const struct file_operations i915_dsc_fec_support_fops = {
4560         .owner = THIS_MODULE,
4561         .open = i915_dsc_fec_support_open,
4562         .read = seq_read,
4563         .llseek = seq_lseek,
4564         .release = single_release,
4565         .write = i915_dsc_fec_support_write
4566 };
4567
4568 /**
4569  * i915_debugfs_connector_add - add i915 specific connector debugfs files
4570  * @connector: pointer to a registered drm_connector
4571  *
4572  * Cleanup will be done by drm_connector_unregister() through a call to
4573  * drm_debugfs_connector_remove().
4574  *
4575  * Returns 0 on success, negative error codes on error.
4576  */
4577 int i915_debugfs_connector_add(struct drm_connector *connector)
4578 {
4579         struct dentry *root = connector->debugfs_entry;
4580         struct drm_i915_private *dev_priv = to_i915(connector->dev);
4581
4582         /* The connector must have been registered beforehands. */
4583         if (!root)
4584                 return -ENODEV;
4585
4586         if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4587             connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4588                 debugfs_create_file("i915_dpcd", S_IRUGO, root,
4589                                     connector, &i915_dpcd_fops);
4590
4591         if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
4592                 debugfs_create_file("i915_panel_timings", S_IRUGO, root,
4593                                     connector, &i915_panel_fops);
4594                 debugfs_create_file("i915_psr_sink_status", S_IRUGO, root,
4595                                     connector, &i915_psr_sink_status_fops);
4596         }
4597
4598         if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4599             connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
4600             connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) {
4601                 debugfs_create_file("i915_hdcp_sink_capability", S_IRUGO, root,
4602                                     connector, &i915_hdcp_sink_capability_fops);
4603         }
4604
4605         if (INTEL_GEN(dev_priv) >= 10 &&
4606             (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4607              connector->connector_type == DRM_MODE_CONNECTOR_eDP))
4608                 debugfs_create_file("i915_dsc_fec_support", S_IRUGO, root,
4609                                     connector, &i915_dsc_fec_support_fops);
4610
4611         return 0;
4612 }