]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/gpu/drm/i915/i915_debugfs.c
Merge tag 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm
[linux.git] / drivers / gpu / drm / i915 / i915_debugfs.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Keith Packard <keithp@keithp.com>
26  *
27  */
28
29 #include <linux/sched/mm.h>
30 #include <linux/sort.h>
31
32 #include <drm/drm_debugfs.h>
33 #include <drm/drm_fourcc.h>
34
35 #include "i915_reset.h"
36 #include "intel_dp.h"
37 #include "intel_drv.h"
38 #include "intel_fbc.h"
39 #include "intel_guc_submission.h"
40 #include "intel_hdcp.h"
41 #include "intel_hdmi.h"
42 #include "intel_pm.h"
43 #include "intel_psr.h"
44
45 static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
46 {
47         return to_i915(node->minor->dev);
48 }
49
50 static int i915_capabilities(struct seq_file *m, void *data)
51 {
52         struct drm_i915_private *dev_priv = node_to_i915(m->private);
53         const struct intel_device_info *info = INTEL_INFO(dev_priv);
54         struct drm_printer p = drm_seq_file_printer(m);
55
56         seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv));
57         seq_printf(m, "platform: %s\n", intel_platform_name(info->platform));
58         seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
59
60         intel_device_info_dump_flags(info, &p);
61         intel_device_info_dump_runtime(RUNTIME_INFO(dev_priv), &p);
62         intel_driver_caps_print(&dev_priv->caps, &p);
63
64         kernel_param_lock(THIS_MODULE);
65         i915_params_dump(&i915_modparams, &p);
66         kernel_param_unlock(THIS_MODULE);
67
68         return 0;
69 }
70
71 static char get_active_flag(struct drm_i915_gem_object *obj)
72 {
73         return i915_gem_object_is_active(obj) ? '*' : ' ';
74 }
75
76 static char get_pin_flag(struct drm_i915_gem_object *obj)
77 {
78         return obj->pin_global ? 'p' : ' ';
79 }
80
81 static char get_tiling_flag(struct drm_i915_gem_object *obj)
82 {
83         switch (i915_gem_object_get_tiling(obj)) {
84         default:
85         case I915_TILING_NONE: return ' ';
86         case I915_TILING_X: return 'X';
87         case I915_TILING_Y: return 'Y';
88         }
89 }
90
91 static char get_global_flag(struct drm_i915_gem_object *obj)
92 {
93         return obj->userfault_count ? 'g' : ' ';
94 }
95
96 static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
97 {
98         return obj->mm.mapping ? 'M' : ' ';
99 }
100
101 static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
102 {
103         u64 size = 0;
104         struct i915_vma *vma;
105
106         for_each_ggtt_vma(vma, obj) {
107                 if (drm_mm_node_allocated(&vma->node))
108                         size += vma->node.size;
109         }
110
111         return size;
112 }
113
114 static const char *
115 stringify_page_sizes(unsigned int page_sizes, char *buf, size_t len)
116 {
117         size_t x = 0;
118
119         switch (page_sizes) {
120         case 0:
121                 return "";
122         case I915_GTT_PAGE_SIZE_4K:
123                 return "4K";
124         case I915_GTT_PAGE_SIZE_64K:
125                 return "64K";
126         case I915_GTT_PAGE_SIZE_2M:
127                 return "2M";
128         default:
129                 if (!buf)
130                         return "M";
131
132                 if (page_sizes & I915_GTT_PAGE_SIZE_2M)
133                         x += snprintf(buf + x, len - x, "2M, ");
134                 if (page_sizes & I915_GTT_PAGE_SIZE_64K)
135                         x += snprintf(buf + x, len - x, "64K, ");
136                 if (page_sizes & I915_GTT_PAGE_SIZE_4K)
137                         x += snprintf(buf + x, len - x, "4K, ");
138                 buf[x-2] = '\0';
139
140                 return buf;
141         }
142 }
143
144 static void
145 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
146 {
147         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
148         struct intel_engine_cs *engine;
149         struct i915_vma *vma;
150         unsigned int frontbuffer_bits;
151         int pin_count = 0;
152
153         lockdep_assert_held(&obj->base.dev->struct_mutex);
154
155         seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x %s%s%s",
156                    &obj->base,
157                    get_active_flag(obj),
158                    get_pin_flag(obj),
159                    get_tiling_flag(obj),
160                    get_global_flag(obj),
161                    get_pin_mapped_flag(obj),
162                    obj->base.size / 1024,
163                    obj->read_domains,
164                    obj->write_domain,
165                    i915_cache_level_str(dev_priv, obj->cache_level),
166                    obj->mm.dirty ? " dirty" : "",
167                    obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
168         if (obj->base.name)
169                 seq_printf(m, " (name: %d)", obj->base.name);
170         list_for_each_entry(vma, &obj->vma.list, obj_link) {
171                 if (i915_vma_is_pinned(vma))
172                         pin_count++;
173         }
174         seq_printf(m, " (pinned x %d)", pin_count);
175         if (obj->pin_global)
176                 seq_printf(m, " (global)");
177         list_for_each_entry(vma, &obj->vma.list, obj_link) {
178                 if (!drm_mm_node_allocated(&vma->node))
179                         continue;
180
181                 seq_printf(m, " (%sgtt offset: %08llx, size: %08llx, pages: %s",
182                            i915_vma_is_ggtt(vma) ? "g" : "pp",
183                            vma->node.start, vma->node.size,
184                            stringify_page_sizes(vma->page_sizes.gtt, NULL, 0));
185                 if (i915_vma_is_ggtt(vma)) {
186                         switch (vma->ggtt_view.type) {
187                         case I915_GGTT_VIEW_NORMAL:
188                                 seq_puts(m, ", normal");
189                                 break;
190
191                         case I915_GGTT_VIEW_PARTIAL:
192                                 seq_printf(m, ", partial [%08llx+%x]",
193                                            vma->ggtt_view.partial.offset << PAGE_SHIFT,
194                                            vma->ggtt_view.partial.size << PAGE_SHIFT);
195                                 break;
196
197                         case I915_GGTT_VIEW_ROTATED:
198                                 seq_printf(m, ", rotated [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
199                                            vma->ggtt_view.rotated.plane[0].width,
200                                            vma->ggtt_view.rotated.plane[0].height,
201                                            vma->ggtt_view.rotated.plane[0].stride,
202                                            vma->ggtt_view.rotated.plane[0].offset,
203                                            vma->ggtt_view.rotated.plane[1].width,
204                                            vma->ggtt_view.rotated.plane[1].height,
205                                            vma->ggtt_view.rotated.plane[1].stride,
206                                            vma->ggtt_view.rotated.plane[1].offset);
207                                 break;
208
209                         default:
210                                 MISSING_CASE(vma->ggtt_view.type);
211                                 break;
212                         }
213                 }
214                 if (vma->fence)
215                         seq_printf(m, " , fence: %d%s",
216                                    vma->fence->id,
217                                    i915_active_request_isset(&vma->last_fence) ? "*" : "");
218                 seq_puts(m, ")");
219         }
220         if (obj->stolen)
221                 seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
222
223         engine = i915_gem_object_last_write_engine(obj);
224         if (engine)
225                 seq_printf(m, " (%s)", engine->name);
226
227         frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
228         if (frontbuffer_bits)
229                 seq_printf(m, " (frontbuffer: 0x%03x)", frontbuffer_bits);
230 }
231
232 static int obj_rank_by_stolen(const void *A, const void *B)
233 {
234         const struct drm_i915_gem_object *a =
235                 *(const struct drm_i915_gem_object **)A;
236         const struct drm_i915_gem_object *b =
237                 *(const struct drm_i915_gem_object **)B;
238
239         if (a->stolen->start < b->stolen->start)
240                 return -1;
241         if (a->stolen->start > b->stolen->start)
242                 return 1;
243         return 0;
244 }
245
246 static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
247 {
248         struct drm_i915_private *dev_priv = node_to_i915(m->private);
249         struct drm_device *dev = &dev_priv->drm;
250         struct drm_i915_gem_object **objects;
251         struct drm_i915_gem_object *obj;
252         u64 total_obj_size, total_gtt_size;
253         unsigned long total, count, n;
254         int ret;
255
256         total = READ_ONCE(dev_priv->mm.object_count);
257         objects = kvmalloc_array(total, sizeof(*objects), GFP_KERNEL);
258         if (!objects)
259                 return -ENOMEM;
260
261         ret = mutex_lock_interruptible(&dev->struct_mutex);
262         if (ret)
263                 goto out;
264
265         total_obj_size = total_gtt_size = count = 0;
266
267         spin_lock(&dev_priv->mm.obj_lock);
268         list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
269                 if (count == total)
270                         break;
271
272                 if (obj->stolen == NULL)
273                         continue;
274
275                 objects[count++] = obj;
276                 total_obj_size += obj->base.size;
277                 total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
278
279         }
280         list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
281                 if (count == total)
282                         break;
283
284                 if (obj->stolen == NULL)
285                         continue;
286
287                 objects[count++] = obj;
288                 total_obj_size += obj->base.size;
289         }
290         spin_unlock(&dev_priv->mm.obj_lock);
291
292         sort(objects, count, sizeof(*objects), obj_rank_by_stolen, NULL);
293
294         seq_puts(m, "Stolen:\n");
295         for (n = 0; n < count; n++) {
296                 seq_puts(m, "   ");
297                 describe_obj(m, objects[n]);
298                 seq_putc(m, '\n');
299         }
300         seq_printf(m, "Total %lu objects, %llu bytes, %llu GTT size\n",
301                    count, total_obj_size, total_gtt_size);
302
303         mutex_unlock(&dev->struct_mutex);
304 out:
305         kvfree(objects);
306         return ret;
307 }
308
309 struct file_stats {
310         struct i915_address_space *vm;
311         unsigned long count;
312         u64 total, unbound;
313         u64 global, shared;
314         u64 active, inactive;
315         u64 closed;
316 };
317
318 static int per_file_stats(int id, void *ptr, void *data)
319 {
320         struct drm_i915_gem_object *obj = ptr;
321         struct file_stats *stats = data;
322         struct i915_vma *vma;
323
324         lockdep_assert_held(&obj->base.dev->struct_mutex);
325
326         stats->count++;
327         stats->total += obj->base.size;
328         if (!obj->bind_count)
329                 stats->unbound += obj->base.size;
330         if (obj->base.name || obj->base.dma_buf)
331                 stats->shared += obj->base.size;
332
333         list_for_each_entry(vma, &obj->vma.list, obj_link) {
334                 if (!drm_mm_node_allocated(&vma->node))
335                         continue;
336
337                 if (i915_vma_is_ggtt(vma)) {
338                         stats->global += vma->node.size;
339                 } else {
340                         if (vma->vm != stats->vm)
341                                 continue;
342                 }
343
344                 if (i915_vma_is_active(vma))
345                         stats->active += vma->node.size;
346                 else
347                         stats->inactive += vma->node.size;
348
349                 if (i915_vma_is_closed(vma))
350                         stats->closed += vma->node.size;
351         }
352
353         return 0;
354 }
355
356 #define print_file_stats(m, name, stats) do { \
357         if (stats.count) \
358                 seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound, %llu closed)\n", \
359                            name, \
360                            stats.count, \
361                            stats.total, \
362                            stats.active, \
363                            stats.inactive, \
364                            stats.global, \
365                            stats.shared, \
366                            stats.unbound, \
367                            stats.closed); \
368 } while (0)
369
370 static void print_batch_pool_stats(struct seq_file *m,
371                                    struct drm_i915_private *dev_priv)
372 {
373         struct drm_i915_gem_object *obj;
374         struct intel_engine_cs *engine;
375         struct file_stats stats = {};
376         enum intel_engine_id id;
377         int j;
378
379         for_each_engine(engine, dev_priv, id) {
380                 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
381                         list_for_each_entry(obj,
382                                             &engine->batch_pool.cache_list[j],
383                                             batch_pool_link)
384                                 per_file_stats(0, obj, &stats);
385                 }
386         }
387
388         print_file_stats(m, "[k]batch pool", stats);
389 }
390
391 static void print_context_stats(struct seq_file *m,
392                                 struct drm_i915_private *i915)
393 {
394         struct file_stats kstats = {};
395         struct i915_gem_context *ctx;
396
397         list_for_each_entry(ctx, &i915->contexts.list, link) {
398                 struct intel_context *ce;
399
400                 list_for_each_entry(ce, &ctx->active_engines, active_link) {
401                         if (ce->state)
402                                 per_file_stats(0, ce->state->obj, &kstats);
403                         if (ce->ring)
404                                 per_file_stats(0, ce->ring->vma->obj, &kstats);
405                 }
406
407                 if (!IS_ERR_OR_NULL(ctx->file_priv)) {
408                         struct file_stats stats = { .vm = &ctx->ppgtt->vm, };
409                         struct drm_file *file = ctx->file_priv->file;
410                         struct task_struct *task;
411                         char name[80];
412
413                         spin_lock(&file->table_lock);
414                         idr_for_each(&file->object_idr, per_file_stats, &stats);
415                         spin_unlock(&file->table_lock);
416
417                         rcu_read_lock();
418                         task = pid_task(ctx->pid ?: file->pid, PIDTYPE_PID);
419                         snprintf(name, sizeof(name), "%s",
420                                  task ? task->comm : "<unknown>");
421                         rcu_read_unlock();
422
423                         print_file_stats(m, name, stats);
424                 }
425         }
426
427         print_file_stats(m, "[k]contexts", kstats);
428 }
429
430 static int i915_gem_object_info(struct seq_file *m, void *data)
431 {
432         struct drm_i915_private *dev_priv = node_to_i915(m->private);
433         struct drm_device *dev = &dev_priv->drm;
434         struct i915_ggtt *ggtt = &dev_priv->ggtt;
435         u32 count, mapped_count, purgeable_count, dpy_count, huge_count;
436         u64 size, mapped_size, purgeable_size, dpy_size, huge_size;
437         struct drm_i915_gem_object *obj;
438         unsigned int page_sizes = 0;
439         char buf[80];
440         int ret;
441
442         seq_printf(m, "%u objects, %llu bytes\n",
443                    dev_priv->mm.object_count,
444                    dev_priv->mm.object_memory);
445
446         size = count = 0;
447         mapped_size = mapped_count = 0;
448         purgeable_size = purgeable_count = 0;
449         huge_size = huge_count = 0;
450
451         spin_lock(&dev_priv->mm.obj_lock);
452         list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
453                 size += obj->base.size;
454                 ++count;
455
456                 if (obj->mm.madv == I915_MADV_DONTNEED) {
457                         purgeable_size += obj->base.size;
458                         ++purgeable_count;
459                 }
460
461                 if (obj->mm.mapping) {
462                         mapped_count++;
463                         mapped_size += obj->base.size;
464                 }
465
466                 if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
467                         huge_count++;
468                         huge_size += obj->base.size;
469                         page_sizes |= obj->mm.page_sizes.sg;
470                 }
471         }
472         seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
473
474         size = count = dpy_size = dpy_count = 0;
475         list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
476                 size += obj->base.size;
477                 ++count;
478
479                 if (obj->pin_global) {
480                         dpy_size += obj->base.size;
481                         ++dpy_count;
482                 }
483
484                 if (obj->mm.madv == I915_MADV_DONTNEED) {
485                         purgeable_size += obj->base.size;
486                         ++purgeable_count;
487                 }
488
489                 if (obj->mm.mapping) {
490                         mapped_count++;
491                         mapped_size += obj->base.size;
492                 }
493
494                 if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
495                         huge_count++;
496                         huge_size += obj->base.size;
497                         page_sizes |= obj->mm.page_sizes.sg;
498                 }
499         }
500         spin_unlock(&dev_priv->mm.obj_lock);
501
502         seq_printf(m, "%u bound objects, %llu bytes\n",
503                    count, size);
504         seq_printf(m, "%u purgeable objects, %llu bytes\n",
505                    purgeable_count, purgeable_size);
506         seq_printf(m, "%u mapped objects, %llu bytes\n",
507                    mapped_count, mapped_size);
508         seq_printf(m, "%u huge-paged objects (%s) %llu bytes\n",
509                    huge_count,
510                    stringify_page_sizes(page_sizes, buf, sizeof(buf)),
511                    huge_size);
512         seq_printf(m, "%u display objects (globally pinned), %llu bytes\n",
513                    dpy_count, dpy_size);
514
515         seq_printf(m, "%llu [%pa] gtt total\n",
516                    ggtt->vm.total, &ggtt->mappable_end);
517         seq_printf(m, "Supported page sizes: %s\n",
518                    stringify_page_sizes(INTEL_INFO(dev_priv)->page_sizes,
519                                         buf, sizeof(buf)));
520
521         seq_putc(m, '\n');
522
523         ret = mutex_lock_interruptible(&dev->struct_mutex);
524         if (ret)
525                 return ret;
526
527         print_batch_pool_stats(m, dev_priv);
528         print_context_stats(m, dev_priv);
529         mutex_unlock(&dev->struct_mutex);
530
531         return 0;
532 }
533
534 static int i915_gem_gtt_info(struct seq_file *m, void *data)
535 {
536         struct drm_info_node *node = m->private;
537         struct drm_i915_private *dev_priv = node_to_i915(node);
538         struct drm_device *dev = &dev_priv->drm;
539         struct drm_i915_gem_object **objects;
540         struct drm_i915_gem_object *obj;
541         u64 total_obj_size, total_gtt_size;
542         unsigned long nobject, n;
543         int count, ret;
544
545         nobject = READ_ONCE(dev_priv->mm.object_count);
546         objects = kvmalloc_array(nobject, sizeof(*objects), GFP_KERNEL);
547         if (!objects)
548                 return -ENOMEM;
549
550         ret = mutex_lock_interruptible(&dev->struct_mutex);
551         if (ret)
552                 return ret;
553
554         count = 0;
555         spin_lock(&dev_priv->mm.obj_lock);
556         list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
557                 objects[count++] = obj;
558                 if (count == nobject)
559                         break;
560         }
561         spin_unlock(&dev_priv->mm.obj_lock);
562
563         total_obj_size = total_gtt_size = 0;
564         for (n = 0;  n < count; n++) {
565                 obj = objects[n];
566
567                 seq_puts(m, "   ");
568                 describe_obj(m, obj);
569                 seq_putc(m, '\n');
570                 total_obj_size += obj->base.size;
571                 total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
572         }
573
574         mutex_unlock(&dev->struct_mutex);
575
576         seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
577                    count, total_obj_size, total_gtt_size);
578         kvfree(objects);
579
580         return 0;
581 }
582
583 static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
584 {
585         struct drm_i915_private *dev_priv = node_to_i915(m->private);
586         struct drm_device *dev = &dev_priv->drm;
587         struct drm_i915_gem_object *obj;
588         struct intel_engine_cs *engine;
589         enum intel_engine_id id;
590         int total = 0;
591         int ret, j;
592
593         ret = mutex_lock_interruptible(&dev->struct_mutex);
594         if (ret)
595                 return ret;
596
597         for_each_engine(engine, dev_priv, id) {
598                 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
599                         int count;
600
601                         count = 0;
602                         list_for_each_entry(obj,
603                                             &engine->batch_pool.cache_list[j],
604                                             batch_pool_link)
605                                 count++;
606                         seq_printf(m, "%s cache[%d]: %d objects\n",
607                                    engine->name, j, count);
608
609                         list_for_each_entry(obj,
610                                             &engine->batch_pool.cache_list[j],
611                                             batch_pool_link) {
612                                 seq_puts(m, "   ");
613                                 describe_obj(m, obj);
614                                 seq_putc(m, '\n');
615                         }
616
617                         total += count;
618                 }
619         }
620
621         seq_printf(m, "total: %d\n", total);
622
623         mutex_unlock(&dev->struct_mutex);
624
625         return 0;
626 }
627
628 static void gen8_display_interrupt_info(struct seq_file *m)
629 {
630         struct drm_i915_private *dev_priv = node_to_i915(m->private);
631         int pipe;
632
633         for_each_pipe(dev_priv, pipe) {
634                 enum intel_display_power_domain power_domain;
635                 intel_wakeref_t wakeref;
636
637                 power_domain = POWER_DOMAIN_PIPE(pipe);
638                 wakeref = intel_display_power_get_if_enabled(dev_priv,
639                                                              power_domain);
640                 if (!wakeref) {
641                         seq_printf(m, "Pipe %c power disabled\n",
642                                    pipe_name(pipe));
643                         continue;
644                 }
645                 seq_printf(m, "Pipe %c IMR:\t%08x\n",
646                            pipe_name(pipe),
647                            I915_READ(GEN8_DE_PIPE_IMR(pipe)));
648                 seq_printf(m, "Pipe %c IIR:\t%08x\n",
649                            pipe_name(pipe),
650                            I915_READ(GEN8_DE_PIPE_IIR(pipe)));
651                 seq_printf(m, "Pipe %c IER:\t%08x\n",
652                            pipe_name(pipe),
653                            I915_READ(GEN8_DE_PIPE_IER(pipe)));
654
655                 intel_display_power_put(dev_priv, power_domain, wakeref);
656         }
657
658         seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
659                    I915_READ(GEN8_DE_PORT_IMR));
660         seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
661                    I915_READ(GEN8_DE_PORT_IIR));
662         seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
663                    I915_READ(GEN8_DE_PORT_IER));
664
665         seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
666                    I915_READ(GEN8_DE_MISC_IMR));
667         seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
668                    I915_READ(GEN8_DE_MISC_IIR));
669         seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
670                    I915_READ(GEN8_DE_MISC_IER));
671
672         seq_printf(m, "PCU interrupt mask:\t%08x\n",
673                    I915_READ(GEN8_PCU_IMR));
674         seq_printf(m, "PCU interrupt identity:\t%08x\n",
675                    I915_READ(GEN8_PCU_IIR));
676         seq_printf(m, "PCU interrupt enable:\t%08x\n",
677                    I915_READ(GEN8_PCU_IER));
678 }
679
680 static int i915_interrupt_info(struct seq_file *m, void *data)
681 {
682         struct drm_i915_private *dev_priv = node_to_i915(m->private);
683         struct intel_engine_cs *engine;
684         enum intel_engine_id id;
685         intel_wakeref_t wakeref;
686         int i, pipe;
687
688         wakeref = intel_runtime_pm_get(dev_priv);
689
690         if (IS_CHERRYVIEW(dev_priv)) {
691                 intel_wakeref_t pref;
692
693                 seq_printf(m, "Master Interrupt Control:\t%08x\n",
694                            I915_READ(GEN8_MASTER_IRQ));
695
696                 seq_printf(m, "Display IER:\t%08x\n",
697                            I915_READ(VLV_IER));
698                 seq_printf(m, "Display IIR:\t%08x\n",
699                            I915_READ(VLV_IIR));
700                 seq_printf(m, "Display IIR_RW:\t%08x\n",
701                            I915_READ(VLV_IIR_RW));
702                 seq_printf(m, "Display IMR:\t%08x\n",
703                            I915_READ(VLV_IMR));
704                 for_each_pipe(dev_priv, pipe) {
705                         enum intel_display_power_domain power_domain;
706
707                         power_domain = POWER_DOMAIN_PIPE(pipe);
708                         pref = intel_display_power_get_if_enabled(dev_priv,
709                                                                   power_domain);
710                         if (!pref) {
711                                 seq_printf(m, "Pipe %c power disabled\n",
712                                            pipe_name(pipe));
713                                 continue;
714                         }
715
716                         seq_printf(m, "Pipe %c stat:\t%08x\n",
717                                    pipe_name(pipe),
718                                    I915_READ(PIPESTAT(pipe)));
719
720                         intel_display_power_put(dev_priv, power_domain, pref);
721                 }
722
723                 pref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
724                 seq_printf(m, "Port hotplug:\t%08x\n",
725                            I915_READ(PORT_HOTPLUG_EN));
726                 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
727                            I915_READ(VLV_DPFLIPSTAT));
728                 seq_printf(m, "DPINVGTT:\t%08x\n",
729                            I915_READ(DPINVGTT));
730                 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, pref);
731
732                 for (i = 0; i < 4; i++) {
733                         seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
734                                    i, I915_READ(GEN8_GT_IMR(i)));
735                         seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
736                                    i, I915_READ(GEN8_GT_IIR(i)));
737                         seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
738                                    i, I915_READ(GEN8_GT_IER(i)));
739                 }
740
741                 seq_printf(m, "PCU interrupt mask:\t%08x\n",
742                            I915_READ(GEN8_PCU_IMR));
743                 seq_printf(m, "PCU interrupt identity:\t%08x\n",
744                            I915_READ(GEN8_PCU_IIR));
745                 seq_printf(m, "PCU interrupt enable:\t%08x\n",
746                            I915_READ(GEN8_PCU_IER));
747         } else if (INTEL_GEN(dev_priv) >= 11) {
748                 seq_printf(m, "Master Interrupt Control:  %08x\n",
749                            I915_READ(GEN11_GFX_MSTR_IRQ));
750
751                 seq_printf(m, "Render/Copy Intr Enable:   %08x\n",
752                            I915_READ(GEN11_RENDER_COPY_INTR_ENABLE));
753                 seq_printf(m, "VCS/VECS Intr Enable:      %08x\n",
754                            I915_READ(GEN11_VCS_VECS_INTR_ENABLE));
755                 seq_printf(m, "GUC/SG Intr Enable:\t   %08x\n",
756                            I915_READ(GEN11_GUC_SG_INTR_ENABLE));
757                 seq_printf(m, "GPM/WGBOXPERF Intr Enable: %08x\n",
758                            I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE));
759                 seq_printf(m, "Crypto Intr Enable:\t   %08x\n",
760                            I915_READ(GEN11_CRYPTO_RSVD_INTR_ENABLE));
761                 seq_printf(m, "GUnit/CSME Intr Enable:\t   %08x\n",
762                            I915_READ(GEN11_GUNIT_CSME_INTR_ENABLE));
763
764                 seq_printf(m, "Display Interrupt Control:\t%08x\n",
765                            I915_READ(GEN11_DISPLAY_INT_CTL));
766
767                 gen8_display_interrupt_info(m);
768         } else if (INTEL_GEN(dev_priv) >= 8) {
769                 seq_printf(m, "Master Interrupt Control:\t%08x\n",
770                            I915_READ(GEN8_MASTER_IRQ));
771
772                 for (i = 0; i < 4; i++) {
773                         seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
774                                    i, I915_READ(GEN8_GT_IMR(i)));
775                         seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
776                                    i, I915_READ(GEN8_GT_IIR(i)));
777                         seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
778                                    i, I915_READ(GEN8_GT_IER(i)));
779                 }
780
781                 gen8_display_interrupt_info(m);
782         } else if (IS_VALLEYVIEW(dev_priv)) {
783                 seq_printf(m, "Display IER:\t%08x\n",
784                            I915_READ(VLV_IER));
785                 seq_printf(m, "Display IIR:\t%08x\n",
786                            I915_READ(VLV_IIR));
787                 seq_printf(m, "Display IIR_RW:\t%08x\n",
788                            I915_READ(VLV_IIR_RW));
789                 seq_printf(m, "Display IMR:\t%08x\n",
790                            I915_READ(VLV_IMR));
791                 for_each_pipe(dev_priv, pipe) {
792                         enum intel_display_power_domain power_domain;
793                         intel_wakeref_t pref;
794
795                         power_domain = POWER_DOMAIN_PIPE(pipe);
796                         pref = intel_display_power_get_if_enabled(dev_priv,
797                                                                   power_domain);
798                         if (!pref) {
799                                 seq_printf(m, "Pipe %c power disabled\n",
800                                            pipe_name(pipe));
801                                 continue;
802                         }
803
804                         seq_printf(m, "Pipe %c stat:\t%08x\n",
805                                    pipe_name(pipe),
806                                    I915_READ(PIPESTAT(pipe)));
807                         intel_display_power_put(dev_priv, power_domain, pref);
808                 }
809
810                 seq_printf(m, "Master IER:\t%08x\n",
811                            I915_READ(VLV_MASTER_IER));
812
813                 seq_printf(m, "Render IER:\t%08x\n",
814                            I915_READ(GTIER));
815                 seq_printf(m, "Render IIR:\t%08x\n",
816                            I915_READ(GTIIR));
817                 seq_printf(m, "Render IMR:\t%08x\n",
818                            I915_READ(GTIMR));
819
820                 seq_printf(m, "PM IER:\t\t%08x\n",
821                            I915_READ(GEN6_PMIER));
822                 seq_printf(m, "PM IIR:\t\t%08x\n",
823                            I915_READ(GEN6_PMIIR));
824                 seq_printf(m, "PM IMR:\t\t%08x\n",
825                            I915_READ(GEN6_PMIMR));
826
827                 seq_printf(m, "Port hotplug:\t%08x\n",
828                            I915_READ(PORT_HOTPLUG_EN));
829                 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
830                            I915_READ(VLV_DPFLIPSTAT));
831                 seq_printf(m, "DPINVGTT:\t%08x\n",
832                            I915_READ(DPINVGTT));
833
834         } else if (!HAS_PCH_SPLIT(dev_priv)) {
835                 seq_printf(m, "Interrupt enable:    %08x\n",
836                            I915_READ(GEN2_IER));
837                 seq_printf(m, "Interrupt identity:  %08x\n",
838                            I915_READ(GEN2_IIR));
839                 seq_printf(m, "Interrupt mask:      %08x\n",
840                            I915_READ(GEN2_IMR));
841                 for_each_pipe(dev_priv, pipe)
842                         seq_printf(m, "Pipe %c stat:         %08x\n",
843                                    pipe_name(pipe),
844                                    I915_READ(PIPESTAT(pipe)));
845         } else {
846                 seq_printf(m, "North Display Interrupt enable:          %08x\n",
847                            I915_READ(DEIER));
848                 seq_printf(m, "North Display Interrupt identity:        %08x\n",
849                            I915_READ(DEIIR));
850                 seq_printf(m, "North Display Interrupt mask:            %08x\n",
851                            I915_READ(DEIMR));
852                 seq_printf(m, "South Display Interrupt enable:          %08x\n",
853                            I915_READ(SDEIER));
854                 seq_printf(m, "South Display Interrupt identity:        %08x\n",
855                            I915_READ(SDEIIR));
856                 seq_printf(m, "South Display Interrupt mask:            %08x\n",
857                            I915_READ(SDEIMR));
858                 seq_printf(m, "Graphics Interrupt enable:               %08x\n",
859                            I915_READ(GTIER));
860                 seq_printf(m, "Graphics Interrupt identity:             %08x\n",
861                            I915_READ(GTIIR));
862                 seq_printf(m, "Graphics Interrupt mask:         %08x\n",
863                            I915_READ(GTIMR));
864         }
865
866         if (INTEL_GEN(dev_priv) >= 11) {
867                 seq_printf(m, "RCS Intr Mask:\t %08x\n",
868                            I915_READ(GEN11_RCS0_RSVD_INTR_MASK));
869                 seq_printf(m, "BCS Intr Mask:\t %08x\n",
870                            I915_READ(GEN11_BCS_RSVD_INTR_MASK));
871                 seq_printf(m, "VCS0/VCS1 Intr Mask:\t %08x\n",
872                            I915_READ(GEN11_VCS0_VCS1_INTR_MASK));
873                 seq_printf(m, "VCS2/VCS3 Intr Mask:\t %08x\n",
874                            I915_READ(GEN11_VCS2_VCS3_INTR_MASK));
875                 seq_printf(m, "VECS0/VECS1 Intr Mask:\t %08x\n",
876                            I915_READ(GEN11_VECS0_VECS1_INTR_MASK));
877                 seq_printf(m, "GUC/SG Intr Mask:\t %08x\n",
878                            I915_READ(GEN11_GUC_SG_INTR_MASK));
879                 seq_printf(m, "GPM/WGBOXPERF Intr Mask: %08x\n",
880                            I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK));
881                 seq_printf(m, "Crypto Intr Mask:\t %08x\n",
882                            I915_READ(GEN11_CRYPTO_RSVD_INTR_MASK));
883                 seq_printf(m, "Gunit/CSME Intr Mask:\t %08x\n",
884                            I915_READ(GEN11_GUNIT_CSME_INTR_MASK));
885
886         } else if (INTEL_GEN(dev_priv) >= 6) {
887                 for_each_engine(engine, dev_priv, id) {
888                         seq_printf(m,
889                                    "Graphics Interrupt mask (%s):       %08x\n",
890                                    engine->name, ENGINE_READ(engine, RING_IMR));
891                 }
892         }
893
894         intel_runtime_pm_put(dev_priv, wakeref);
895
896         return 0;
897 }
898
899 static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
900 {
901         struct drm_i915_private *dev_priv = node_to_i915(m->private);
902         struct drm_device *dev = &dev_priv->drm;
903         int i, ret;
904
905         ret = mutex_lock_interruptible(&dev->struct_mutex);
906         if (ret)
907                 return ret;
908
909         seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
910         for (i = 0; i < dev_priv->num_fence_regs; i++) {
911                 struct i915_vma *vma = dev_priv->fence_regs[i].vma;
912
913                 seq_printf(m, "Fence %d, pin count = %d, object = ",
914                            i, dev_priv->fence_regs[i].pin_count);
915                 if (!vma)
916                         seq_puts(m, "unused");
917                 else
918                         describe_obj(m, vma->obj);
919                 seq_putc(m, '\n');
920         }
921
922         mutex_unlock(&dev->struct_mutex);
923         return 0;
924 }
925
926 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
927 static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
928                               size_t count, loff_t *pos)
929 {
930         struct i915_gpu_state *error;
931         ssize_t ret;
932         void *buf;
933
934         error = file->private_data;
935         if (!error)
936                 return 0;
937
938         /* Bounce buffer required because of kernfs __user API convenience. */
939         buf = kmalloc(count, GFP_KERNEL);
940         if (!buf)
941                 return -ENOMEM;
942
943         ret = i915_gpu_state_copy_to_buffer(error, buf, *pos, count);
944         if (ret <= 0)
945                 goto out;
946
947         if (!copy_to_user(ubuf, buf, ret))
948                 *pos += ret;
949         else
950                 ret = -EFAULT;
951
952 out:
953         kfree(buf);
954         return ret;
955 }
956
957 static int gpu_state_release(struct inode *inode, struct file *file)
958 {
959         i915_gpu_state_put(file->private_data);
960         return 0;
961 }
962
963 static int i915_gpu_info_open(struct inode *inode, struct file *file)
964 {
965         struct drm_i915_private *i915 = inode->i_private;
966         struct i915_gpu_state *gpu;
967         intel_wakeref_t wakeref;
968
969         gpu = NULL;
970         with_intel_runtime_pm(i915, wakeref)
971                 gpu = i915_capture_gpu_state(i915);
972         if (IS_ERR(gpu))
973                 return PTR_ERR(gpu);
974
975         file->private_data = gpu;
976         return 0;
977 }
978
979 static const struct file_operations i915_gpu_info_fops = {
980         .owner = THIS_MODULE,
981         .open = i915_gpu_info_open,
982         .read = gpu_state_read,
983         .llseek = default_llseek,
984         .release = gpu_state_release,
985 };
986
987 static ssize_t
988 i915_error_state_write(struct file *filp,
989                        const char __user *ubuf,
990                        size_t cnt,
991                        loff_t *ppos)
992 {
993         struct i915_gpu_state *error = filp->private_data;
994
995         if (!error)
996                 return 0;
997
998         DRM_DEBUG_DRIVER("Resetting error state\n");
999         i915_reset_error_state(error->i915);
1000
1001         return cnt;
1002 }
1003
1004 static int i915_error_state_open(struct inode *inode, struct file *file)
1005 {
1006         struct i915_gpu_state *error;
1007
1008         error = i915_first_error_state(inode->i_private);
1009         if (IS_ERR(error))
1010                 return PTR_ERR(error);
1011
1012         file->private_data  = error;
1013         return 0;
1014 }
1015
1016 static const struct file_operations i915_error_state_fops = {
1017         .owner = THIS_MODULE,
1018         .open = i915_error_state_open,
1019         .read = gpu_state_read,
1020         .write = i915_error_state_write,
1021         .llseek = default_llseek,
1022         .release = gpu_state_release,
1023 };
1024 #endif
1025
1026 static int i915_frequency_info(struct seq_file *m, void *unused)
1027 {
1028         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1029         struct intel_rps *rps = &dev_priv->gt_pm.rps;
1030         intel_wakeref_t wakeref;
1031         int ret = 0;
1032
1033         wakeref = intel_runtime_pm_get(dev_priv);
1034
1035         if (IS_GEN(dev_priv, 5)) {
1036                 u16 rgvswctl = I915_READ16(MEMSWCTL);
1037                 u16 rgvstat = I915_READ16(MEMSTAT_ILK);
1038
1039                 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
1040                 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
1041                 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
1042                            MEMSTAT_VID_SHIFT);
1043                 seq_printf(m, "Current P-state: %d\n",
1044                            (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
1045         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1046                 u32 rpmodectl, freq_sts;
1047
1048                 mutex_lock(&dev_priv->pcu_lock);
1049
1050                 rpmodectl = I915_READ(GEN6_RP_CONTROL);
1051                 seq_printf(m, "Video Turbo Mode: %s\n",
1052                            yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
1053                 seq_printf(m, "HW control enabled: %s\n",
1054                            yesno(rpmodectl & GEN6_RP_ENABLE));
1055                 seq_printf(m, "SW control enabled: %s\n",
1056                            yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
1057                                   GEN6_RP_MEDIA_SW_MODE));
1058
1059                 freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
1060                 seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
1061                 seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
1062
1063                 seq_printf(m, "actual GPU freq: %d MHz\n",
1064                            intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
1065
1066                 seq_printf(m, "current GPU freq: %d MHz\n",
1067                            intel_gpu_freq(dev_priv, rps->cur_freq));
1068
1069                 seq_printf(m, "max GPU freq: %d MHz\n",
1070                            intel_gpu_freq(dev_priv, rps->max_freq));
1071
1072                 seq_printf(m, "min GPU freq: %d MHz\n",
1073                            intel_gpu_freq(dev_priv, rps->min_freq));
1074
1075                 seq_printf(m, "idle GPU freq: %d MHz\n",
1076                            intel_gpu_freq(dev_priv, rps->idle_freq));
1077
1078                 seq_printf(m,
1079                            "efficient (RPe) frequency: %d MHz\n",
1080                            intel_gpu_freq(dev_priv, rps->efficient_freq));
1081                 mutex_unlock(&dev_priv->pcu_lock);
1082         } else if (INTEL_GEN(dev_priv) >= 6) {
1083                 u32 rp_state_limits;
1084                 u32 gt_perf_status;
1085                 u32 rp_state_cap;
1086                 u32 rpmodectl, rpinclimit, rpdeclimit;
1087                 u32 rpstat, cagf, reqf;
1088                 u32 rpupei, rpcurup, rpprevup;
1089                 u32 rpdownei, rpcurdown, rpprevdown;
1090                 u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
1091                 int max_freq;
1092
1093                 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
1094                 if (IS_GEN9_LP(dev_priv)) {
1095                         rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
1096                         gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
1097                 } else {
1098                         rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
1099                         gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
1100                 }
1101
1102                 /* RPSTAT1 is in the GT power well */
1103                 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
1104
1105                 reqf = I915_READ(GEN6_RPNSWREQ);
1106                 if (INTEL_GEN(dev_priv) >= 9)
1107                         reqf >>= 23;
1108                 else {
1109                         reqf &= ~GEN6_TURBO_DISABLE;
1110                         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
1111                                 reqf >>= 24;
1112                         else
1113                                 reqf >>= 25;
1114                 }
1115                 reqf = intel_gpu_freq(dev_priv, reqf);
1116
1117                 rpmodectl = I915_READ(GEN6_RP_CONTROL);
1118                 rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
1119                 rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
1120
1121                 rpstat = I915_READ(GEN6_RPSTAT1);
1122                 rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
1123                 rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
1124                 rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
1125                 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
1126                 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
1127                 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
1128                 cagf = intel_gpu_freq(dev_priv,
1129                                       intel_get_cagf(dev_priv, rpstat));
1130
1131                 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
1132
1133                 if (INTEL_GEN(dev_priv) >= 11) {
1134                         pm_ier = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE);
1135                         pm_imr = I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK);
1136                         /*
1137                          * The equivalent to the PM ISR & IIR cannot be read
1138                          * without affecting the current state of the system
1139                          */
1140                         pm_isr = 0;
1141                         pm_iir = 0;
1142                 } else if (INTEL_GEN(dev_priv) >= 8) {
1143                         pm_ier = I915_READ(GEN8_GT_IER(2));
1144                         pm_imr = I915_READ(GEN8_GT_IMR(2));
1145                         pm_isr = I915_READ(GEN8_GT_ISR(2));
1146                         pm_iir = I915_READ(GEN8_GT_IIR(2));
1147                 } else {
1148                         pm_ier = I915_READ(GEN6_PMIER);
1149                         pm_imr = I915_READ(GEN6_PMIMR);
1150                         pm_isr = I915_READ(GEN6_PMISR);
1151                         pm_iir = I915_READ(GEN6_PMIIR);
1152                 }
1153                 pm_mask = I915_READ(GEN6_PMINTRMSK);
1154
1155                 seq_printf(m, "Video Turbo Mode: %s\n",
1156                            yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
1157                 seq_printf(m, "HW control enabled: %s\n",
1158                            yesno(rpmodectl & GEN6_RP_ENABLE));
1159                 seq_printf(m, "SW control enabled: %s\n",
1160                            yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
1161                                   GEN6_RP_MEDIA_SW_MODE));
1162
1163                 seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
1164                            pm_ier, pm_imr, pm_mask);
1165                 if (INTEL_GEN(dev_priv) <= 10)
1166                         seq_printf(m, "PM ISR=0x%08x IIR=0x%08x\n",
1167                                    pm_isr, pm_iir);
1168                 seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
1169                            rps->pm_intrmsk_mbz);
1170                 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
1171                 seq_printf(m, "Render p-state ratio: %d\n",
1172                            (gt_perf_status & (INTEL_GEN(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
1173                 seq_printf(m, "Render p-state VID: %d\n",
1174                            gt_perf_status & 0xff);
1175                 seq_printf(m, "Render p-state limit: %d\n",
1176                            rp_state_limits & 0xff);
1177                 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
1178                 seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
1179                 seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
1180                 seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
1181                 seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
1182                 seq_printf(m, "CAGF: %dMHz\n", cagf);
1183                 seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
1184                            rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei));
1185                 seq_printf(m, "RP CUR UP: %d (%dus)\n",
1186                            rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
1187                 seq_printf(m, "RP PREV UP: %d (%dus)\n",
1188                            rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
1189                 seq_printf(m, "Up threshold: %d%%\n",
1190                            rps->power.up_threshold);
1191
1192                 seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
1193                            rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
1194                 seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
1195                            rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
1196                 seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
1197                            rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
1198                 seq_printf(m, "Down threshold: %d%%\n",
1199                            rps->power.down_threshold);
1200
1201                 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
1202                             rp_state_cap >> 16) & 0xff;
1203                 max_freq *= (IS_GEN9_BC(dev_priv) ||
1204                              INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
1205                 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
1206                            intel_gpu_freq(dev_priv, max_freq));
1207
1208                 max_freq = (rp_state_cap & 0xff00) >> 8;
1209                 max_freq *= (IS_GEN9_BC(dev_priv) ||
1210                              INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
1211                 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
1212                            intel_gpu_freq(dev_priv, max_freq));
1213
1214                 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
1215                             rp_state_cap >> 0) & 0xff;
1216                 max_freq *= (IS_GEN9_BC(dev_priv) ||
1217                              INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
1218                 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
1219                            intel_gpu_freq(dev_priv, max_freq));
1220                 seq_printf(m, "Max overclocked frequency: %dMHz\n",
1221                            intel_gpu_freq(dev_priv, rps->max_freq));
1222
1223                 seq_printf(m, "Current freq: %d MHz\n",
1224                            intel_gpu_freq(dev_priv, rps->cur_freq));
1225                 seq_printf(m, "Actual freq: %d MHz\n", cagf);
1226                 seq_printf(m, "Idle freq: %d MHz\n",
1227                            intel_gpu_freq(dev_priv, rps->idle_freq));
1228                 seq_printf(m, "Min freq: %d MHz\n",
1229                            intel_gpu_freq(dev_priv, rps->min_freq));
1230                 seq_printf(m, "Boost freq: %d MHz\n",
1231                            intel_gpu_freq(dev_priv, rps->boost_freq));
1232                 seq_printf(m, "Max freq: %d MHz\n",
1233                            intel_gpu_freq(dev_priv, rps->max_freq));
1234                 seq_printf(m,
1235                            "efficient (RPe) frequency: %d MHz\n",
1236                            intel_gpu_freq(dev_priv, rps->efficient_freq));
1237         } else {
1238                 seq_puts(m, "no P-state info available\n");
1239         }
1240
1241         seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk.hw.cdclk);
1242         seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
1243         seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
1244
1245         intel_runtime_pm_put(dev_priv, wakeref);
1246         return ret;
1247 }
1248
1249 static void i915_instdone_info(struct drm_i915_private *dev_priv,
1250                                struct seq_file *m,
1251                                struct intel_instdone *instdone)
1252 {
1253         int slice;
1254         int subslice;
1255
1256         seq_printf(m, "\t\tINSTDONE: 0x%08x\n",
1257                    instdone->instdone);
1258
1259         if (INTEL_GEN(dev_priv) <= 3)
1260                 return;
1261
1262         seq_printf(m, "\t\tSC_INSTDONE: 0x%08x\n",
1263                    instdone->slice_common);
1264
1265         if (INTEL_GEN(dev_priv) <= 6)
1266                 return;
1267
1268         for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1269                 seq_printf(m, "\t\tSAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
1270                            slice, subslice, instdone->sampler[slice][subslice]);
1271
1272         for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1273                 seq_printf(m, "\t\tROW_INSTDONE[%d][%d]: 0x%08x\n",
1274                            slice, subslice, instdone->row[slice][subslice]);
1275 }
1276
1277 static int i915_hangcheck_info(struct seq_file *m, void *unused)
1278 {
1279         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1280         struct intel_engine_cs *engine;
1281         u64 acthd[I915_NUM_ENGINES];
1282         u32 seqno[I915_NUM_ENGINES];
1283         struct intel_instdone instdone;
1284         intel_wakeref_t wakeref;
1285         enum intel_engine_id id;
1286
1287         seq_printf(m, "Reset flags: %lx\n", dev_priv->gpu_error.flags);
1288         if (test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
1289                 seq_puts(m, "\tWedged\n");
1290         if (test_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags))
1291                 seq_puts(m, "\tDevice (global) reset in progress\n");
1292
1293         if (!i915_modparams.enable_hangcheck) {
1294                 seq_puts(m, "Hangcheck disabled\n");
1295                 return 0;
1296         }
1297
1298         with_intel_runtime_pm(dev_priv, wakeref) {
1299                 for_each_engine(engine, dev_priv, id) {
1300                         acthd[id] = intel_engine_get_active_head(engine);
1301                         seqno[id] = intel_engine_get_hangcheck_seqno(engine);
1302                 }
1303
1304                 intel_engine_get_instdone(dev_priv->engine[RCS0], &instdone);
1305         }
1306
1307         if (timer_pending(&dev_priv->gpu_error.hangcheck_work.timer))
1308                 seq_printf(m, "Hangcheck active, timer fires in %dms\n",
1309                            jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires -
1310                                             jiffies));
1311         else if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work))
1312                 seq_puts(m, "Hangcheck active, work pending\n");
1313         else
1314                 seq_puts(m, "Hangcheck inactive\n");
1315
1316         seq_printf(m, "GT active? %s\n", yesno(dev_priv->gt.awake));
1317
1318         for_each_engine(engine, dev_priv, id) {
1319                 seq_printf(m, "%s:\n", engine->name);
1320                 seq_printf(m, "\tseqno = %x [current %x, last %x], %dms ago\n",
1321                            engine->hangcheck.last_seqno,
1322                            seqno[id],
1323                            engine->hangcheck.next_seqno,
1324                            jiffies_to_msecs(jiffies -
1325                                             engine->hangcheck.action_timestamp));
1326
1327                 seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
1328                            (long long)engine->hangcheck.acthd,
1329                            (long long)acthd[id]);
1330
1331                 if (engine->id == RCS0) {
1332                         seq_puts(m, "\tinstdone read =\n");
1333
1334                         i915_instdone_info(dev_priv, m, &instdone);
1335
1336                         seq_puts(m, "\tinstdone accu =\n");
1337
1338                         i915_instdone_info(dev_priv, m,
1339                                            &engine->hangcheck.instdone);
1340                 }
1341         }
1342
1343         return 0;
1344 }
1345
1346 static int i915_reset_info(struct seq_file *m, void *unused)
1347 {
1348         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1349         struct i915_gpu_error *error = &dev_priv->gpu_error;
1350         struct intel_engine_cs *engine;
1351         enum intel_engine_id id;
1352
1353         seq_printf(m, "full gpu reset = %u\n", i915_reset_count(error));
1354
1355         for_each_engine(engine, dev_priv, id) {
1356                 seq_printf(m, "%s = %u\n", engine->name,
1357                            i915_reset_engine_count(error, engine));
1358         }
1359
1360         return 0;
1361 }
1362
1363 static int ironlake_drpc_info(struct seq_file *m)
1364 {
1365         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1366         u32 rgvmodectl, rstdbyctl;
1367         u16 crstandvid;
1368
1369         rgvmodectl = I915_READ(MEMMODECTL);
1370         rstdbyctl = I915_READ(RSTDBYCTL);
1371         crstandvid = I915_READ16(CRSTANDVID);
1372
1373         seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
1374         seq_printf(m, "Boost freq: %d\n",
1375                    (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
1376                    MEMMODE_BOOST_FREQ_SHIFT);
1377         seq_printf(m, "HW control enabled: %s\n",
1378                    yesno(rgvmodectl & MEMMODE_HWIDLE_EN));
1379         seq_printf(m, "SW control enabled: %s\n",
1380                    yesno(rgvmodectl & MEMMODE_SWMODE_EN));
1381         seq_printf(m, "Gated voltage change: %s\n",
1382                    yesno(rgvmodectl & MEMMODE_RCLK_GATE));
1383         seq_printf(m, "Starting frequency: P%d\n",
1384                    (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
1385         seq_printf(m, "Max P-state: P%d\n",
1386                    (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
1387         seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
1388         seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
1389         seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1390         seq_printf(m, "Render standby enabled: %s\n",
1391                    yesno(!(rstdbyctl & RCX_SW_EXIT)));
1392         seq_puts(m, "Current RS state: ");
1393         switch (rstdbyctl & RSX_STATUS_MASK) {
1394         case RSX_STATUS_ON:
1395                 seq_puts(m, "on\n");
1396                 break;
1397         case RSX_STATUS_RC1:
1398                 seq_puts(m, "RC1\n");
1399                 break;
1400         case RSX_STATUS_RC1E:
1401                 seq_puts(m, "RC1E\n");
1402                 break;
1403         case RSX_STATUS_RS1:
1404                 seq_puts(m, "RS1\n");
1405                 break;
1406         case RSX_STATUS_RS2:
1407                 seq_puts(m, "RS2 (RC6)\n");
1408                 break;
1409         case RSX_STATUS_RS3:
1410                 seq_puts(m, "RC3 (RC6+)\n");
1411                 break;
1412         default:
1413                 seq_puts(m, "unknown\n");
1414                 break;
1415         }
1416
1417         return 0;
1418 }
1419
1420 static int i915_forcewake_domains(struct seq_file *m, void *data)
1421 {
1422         struct drm_i915_private *i915 = node_to_i915(m->private);
1423         struct intel_uncore *uncore = &i915->uncore;
1424         struct intel_uncore_forcewake_domain *fw_domain;
1425         unsigned int tmp;
1426
1427         seq_printf(m, "user.bypass_count = %u\n",
1428                    uncore->user_forcewake.count);
1429
1430         for_each_fw_domain(fw_domain, uncore, tmp)
1431                 seq_printf(m, "%s.wake_count = %u\n",
1432                            intel_uncore_forcewake_domain_to_str(fw_domain->id),
1433                            READ_ONCE(fw_domain->wake_count));
1434
1435         return 0;
1436 }
1437
1438 static void print_rc6_res(struct seq_file *m,
1439                           const char *title,
1440                           const i915_reg_t reg)
1441 {
1442         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1443
1444         seq_printf(m, "%s %u (%llu us)\n",
1445                    title, I915_READ(reg),
1446                    intel_rc6_residency_us(dev_priv, reg));
1447 }
1448
1449 static int vlv_drpc_info(struct seq_file *m)
1450 {
1451         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1452         u32 rcctl1, pw_status;
1453
1454         pw_status = I915_READ(VLV_GTLC_PW_STATUS);
1455         rcctl1 = I915_READ(GEN6_RC_CONTROL);
1456
1457         seq_printf(m, "RC6 Enabled: %s\n",
1458                    yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
1459                                         GEN6_RC_CTL_EI_MODE(1))));
1460         seq_printf(m, "Render Power Well: %s\n",
1461                    (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
1462         seq_printf(m, "Media Power Well: %s\n",
1463                    (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
1464
1465         print_rc6_res(m, "Render RC6 residency since boot:", VLV_GT_RENDER_RC6);
1466         print_rc6_res(m, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6);
1467
1468         return i915_forcewake_domains(m, NULL);
1469 }
1470
1471 static int gen6_drpc_info(struct seq_file *m)
1472 {
1473         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1474         u32 gt_core_status, rcctl1, rc6vids = 0;
1475         u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
1476
1477         gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS);
1478         trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
1479
1480         rcctl1 = I915_READ(GEN6_RC_CONTROL);
1481         if (INTEL_GEN(dev_priv) >= 9) {
1482                 gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE);
1483                 gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
1484         }
1485
1486         if (INTEL_GEN(dev_priv) <= 7) {
1487                 mutex_lock(&dev_priv->pcu_lock);
1488                 sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS,
1489                                        &rc6vids);
1490                 mutex_unlock(&dev_priv->pcu_lock);
1491         }
1492
1493         seq_printf(m, "RC1e Enabled: %s\n",
1494                    yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1495         seq_printf(m, "RC6 Enabled: %s\n",
1496                    yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
1497         if (INTEL_GEN(dev_priv) >= 9) {
1498                 seq_printf(m, "Render Well Gating Enabled: %s\n",
1499                         yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
1500                 seq_printf(m, "Media Well Gating Enabled: %s\n",
1501                         yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE));
1502         }
1503         seq_printf(m, "Deep RC6 Enabled: %s\n",
1504                    yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1505         seq_printf(m, "Deepest RC6 Enabled: %s\n",
1506                    yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
1507         seq_puts(m, "Current RC state: ");
1508         switch (gt_core_status & GEN6_RCn_MASK) {
1509         case GEN6_RC0:
1510                 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
1511                         seq_puts(m, "Core Power Down\n");
1512                 else
1513                         seq_puts(m, "on\n");
1514                 break;
1515         case GEN6_RC3:
1516                 seq_puts(m, "RC3\n");
1517                 break;
1518         case GEN6_RC6:
1519                 seq_puts(m, "RC6\n");
1520                 break;
1521         case GEN6_RC7:
1522                 seq_puts(m, "RC7\n");
1523                 break;
1524         default:
1525                 seq_puts(m, "Unknown\n");
1526                 break;
1527         }
1528
1529         seq_printf(m, "Core Power Down: %s\n",
1530                    yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
1531         if (INTEL_GEN(dev_priv) >= 9) {
1532                 seq_printf(m, "Render Power Well: %s\n",
1533                         (gen9_powergate_status &
1534                          GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
1535                 seq_printf(m, "Media Power Well: %s\n",
1536                         (gen9_powergate_status &
1537                          GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down");
1538         }
1539
1540         /* Not exactly sure what this is */
1541         print_rc6_res(m, "RC6 \"Locked to RPn\" residency since boot:",
1542                       GEN6_GT_GFX_RC6_LOCKED);
1543         print_rc6_res(m, "RC6 residency since boot:", GEN6_GT_GFX_RC6);
1544         print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p);
1545         print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp);
1546
1547         if (INTEL_GEN(dev_priv) <= 7) {
1548                 seq_printf(m, "RC6   voltage: %dmV\n",
1549                            GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
1550                 seq_printf(m, "RC6+  voltage: %dmV\n",
1551                            GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
1552                 seq_printf(m, "RC6++ voltage: %dmV\n",
1553                            GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
1554         }
1555
1556         return i915_forcewake_domains(m, NULL);
1557 }
1558
1559 static int i915_drpc_info(struct seq_file *m, void *unused)
1560 {
1561         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1562         intel_wakeref_t wakeref;
1563         int err = -ENODEV;
1564
1565         with_intel_runtime_pm(dev_priv, wakeref) {
1566                 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1567                         err = vlv_drpc_info(m);
1568                 else if (INTEL_GEN(dev_priv) >= 6)
1569                         err = gen6_drpc_info(m);
1570                 else
1571                         err = ironlake_drpc_info(m);
1572         }
1573
1574         return err;
1575 }
1576
1577 static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
1578 {
1579         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1580
1581         seq_printf(m, "FB tracking busy bits: 0x%08x\n",
1582                    dev_priv->fb_tracking.busy_bits);
1583
1584         seq_printf(m, "FB tracking flip bits: 0x%08x\n",
1585                    dev_priv->fb_tracking.flip_bits);
1586
1587         return 0;
1588 }
1589
1590 static int i915_fbc_status(struct seq_file *m, void *unused)
1591 {
1592         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1593         struct intel_fbc *fbc = &dev_priv->fbc;
1594         intel_wakeref_t wakeref;
1595
1596         if (!HAS_FBC(dev_priv))
1597                 return -ENODEV;
1598
1599         wakeref = intel_runtime_pm_get(dev_priv);
1600         mutex_lock(&fbc->lock);
1601
1602         if (intel_fbc_is_active(dev_priv))
1603                 seq_puts(m, "FBC enabled\n");
1604         else
1605                 seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
1606
1607         if (intel_fbc_is_active(dev_priv)) {
1608                 u32 mask;
1609
1610                 if (INTEL_GEN(dev_priv) >= 8)
1611                         mask = I915_READ(IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
1612                 else if (INTEL_GEN(dev_priv) >= 7)
1613                         mask = I915_READ(IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
1614                 else if (INTEL_GEN(dev_priv) >= 5)
1615                         mask = I915_READ(ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
1616                 else if (IS_G4X(dev_priv))
1617                         mask = I915_READ(DPFC_STATUS) & DPFC_COMP_SEG_MASK;
1618                 else
1619                         mask = I915_READ(FBC_STATUS) & (FBC_STAT_COMPRESSING |
1620                                                         FBC_STAT_COMPRESSED);
1621
1622                 seq_printf(m, "Compressing: %s\n", yesno(mask));
1623         }
1624
1625         mutex_unlock(&fbc->lock);
1626         intel_runtime_pm_put(dev_priv, wakeref);
1627
1628         return 0;
1629 }
1630
1631 static int i915_fbc_false_color_get(void *data, u64 *val)
1632 {
1633         struct drm_i915_private *dev_priv = data;
1634
1635         if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
1636                 return -ENODEV;
1637
1638         *val = dev_priv->fbc.false_color;
1639
1640         return 0;
1641 }
1642
1643 static int i915_fbc_false_color_set(void *data, u64 val)
1644 {
1645         struct drm_i915_private *dev_priv = data;
1646         u32 reg;
1647
1648         if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
1649                 return -ENODEV;
1650
1651         mutex_lock(&dev_priv->fbc.lock);
1652
1653         reg = I915_READ(ILK_DPFC_CONTROL);
1654         dev_priv->fbc.false_color = val;
1655
1656         I915_WRITE(ILK_DPFC_CONTROL, val ?
1657                    (reg | FBC_CTL_FALSE_COLOR) :
1658                    (reg & ~FBC_CTL_FALSE_COLOR));
1659
1660         mutex_unlock(&dev_priv->fbc.lock);
1661         return 0;
1662 }
1663
1664 DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
1665                         i915_fbc_false_color_get, i915_fbc_false_color_set,
1666                         "%llu\n");
1667
1668 static int i915_ips_status(struct seq_file *m, void *unused)
1669 {
1670         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1671         intel_wakeref_t wakeref;
1672
1673         if (!HAS_IPS(dev_priv))
1674                 return -ENODEV;
1675
1676         wakeref = intel_runtime_pm_get(dev_priv);
1677
1678         seq_printf(m, "Enabled by kernel parameter: %s\n",
1679                    yesno(i915_modparams.enable_ips));
1680
1681         if (INTEL_GEN(dev_priv) >= 8) {
1682                 seq_puts(m, "Currently: unknown\n");
1683         } else {
1684                 if (I915_READ(IPS_CTL) & IPS_ENABLE)
1685                         seq_puts(m, "Currently: enabled\n");
1686                 else
1687                         seq_puts(m, "Currently: disabled\n");
1688         }
1689
1690         intel_runtime_pm_put(dev_priv, wakeref);
1691
1692         return 0;
1693 }
1694
1695 static int i915_sr_status(struct seq_file *m, void *unused)
1696 {
1697         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1698         intel_wakeref_t wakeref;
1699         bool sr_enabled = false;
1700
1701         wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
1702
1703         if (INTEL_GEN(dev_priv) >= 9)
1704                 /* no global SR status; inspect per-plane WM */;
1705         else if (HAS_PCH_SPLIT(dev_priv))
1706                 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
1707         else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
1708                  IS_I945G(dev_priv) || IS_I945GM(dev_priv))
1709                 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
1710         else if (IS_I915GM(dev_priv))
1711                 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
1712         else if (IS_PINEVIEW(dev_priv))
1713                 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
1714         else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1715                 sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
1716
1717         intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
1718
1719         seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
1720
1721         return 0;
1722 }
1723
1724 static int i915_emon_status(struct seq_file *m, void *unused)
1725 {
1726         struct drm_i915_private *i915 = node_to_i915(m->private);
1727         intel_wakeref_t wakeref;
1728
1729         if (!IS_GEN(i915, 5))
1730                 return -ENODEV;
1731
1732         with_intel_runtime_pm(i915, wakeref) {
1733                 unsigned long temp, chipset, gfx;
1734
1735                 temp = i915_mch_val(i915);
1736                 chipset = i915_chipset_val(i915);
1737                 gfx = i915_gfx_val(i915);
1738
1739                 seq_printf(m, "GMCH temp: %ld\n", temp);
1740                 seq_printf(m, "Chipset power: %ld\n", chipset);
1741                 seq_printf(m, "GFX power: %ld\n", gfx);
1742                 seq_printf(m, "Total power: %ld\n", chipset + gfx);
1743         }
1744
1745         return 0;
1746 }
1747
1748 static int i915_ring_freq_table(struct seq_file *m, void *unused)
1749 {
1750         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1751         struct intel_rps *rps = &dev_priv->gt_pm.rps;
1752         unsigned int max_gpu_freq, min_gpu_freq;
1753         intel_wakeref_t wakeref;
1754         int gpu_freq, ia_freq;
1755         int ret;
1756
1757         if (!HAS_LLC(dev_priv))
1758                 return -ENODEV;
1759
1760         wakeref = intel_runtime_pm_get(dev_priv);
1761
1762         ret = mutex_lock_interruptible(&dev_priv->pcu_lock);
1763         if (ret)
1764                 goto out;
1765
1766         min_gpu_freq = rps->min_freq;
1767         max_gpu_freq = rps->max_freq;
1768         if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
1769                 /* Convert GT frequency to 50 HZ units */
1770                 min_gpu_freq /= GEN9_FREQ_SCALER;
1771                 max_gpu_freq /= GEN9_FREQ_SCALER;
1772         }
1773
1774         seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
1775
1776         for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
1777                 ia_freq = gpu_freq;
1778                 sandybridge_pcode_read(dev_priv,
1779                                        GEN6_PCODE_READ_MIN_FREQ_TABLE,
1780                                        &ia_freq);
1781                 seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
1782                            intel_gpu_freq(dev_priv, (gpu_freq *
1783                                                      (IS_GEN9_BC(dev_priv) ||
1784                                                       INTEL_GEN(dev_priv) >= 10 ?
1785                                                       GEN9_FREQ_SCALER : 1))),
1786                            ((ia_freq >> 0) & 0xff) * 100,
1787                            ((ia_freq >> 8) & 0xff) * 100);
1788         }
1789
1790         mutex_unlock(&dev_priv->pcu_lock);
1791
1792 out:
1793         intel_runtime_pm_put(dev_priv, wakeref);
1794         return ret;
1795 }
1796
1797 static int i915_opregion(struct seq_file *m, void *unused)
1798 {
1799         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1800         struct drm_device *dev = &dev_priv->drm;
1801         struct intel_opregion *opregion = &dev_priv->opregion;
1802         int ret;
1803
1804         ret = mutex_lock_interruptible(&dev->struct_mutex);
1805         if (ret)
1806                 goto out;
1807
1808         if (opregion->header)
1809                 seq_write(m, opregion->header, OPREGION_SIZE);
1810
1811         mutex_unlock(&dev->struct_mutex);
1812
1813 out:
1814         return 0;
1815 }
1816
1817 static int i915_vbt(struct seq_file *m, void *unused)
1818 {
1819         struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
1820
1821         if (opregion->vbt)
1822                 seq_write(m, opregion->vbt, opregion->vbt_size);
1823
1824         return 0;
1825 }
1826
1827 static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1828 {
1829         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1830         struct drm_device *dev = &dev_priv->drm;
1831         struct intel_framebuffer *fbdev_fb = NULL;
1832         struct drm_framebuffer *drm_fb;
1833         int ret;
1834
1835         ret = mutex_lock_interruptible(&dev->struct_mutex);
1836         if (ret)
1837                 return ret;
1838
1839 #ifdef CONFIG_DRM_FBDEV_EMULATION
1840         if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
1841                 fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
1842
1843                 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1844                            fbdev_fb->base.width,
1845                            fbdev_fb->base.height,
1846                            fbdev_fb->base.format->depth,
1847                            fbdev_fb->base.format->cpp[0] * 8,
1848                            fbdev_fb->base.modifier,
1849                            drm_framebuffer_read_refcount(&fbdev_fb->base));
1850                 describe_obj(m, intel_fb_obj(&fbdev_fb->base));
1851                 seq_putc(m, '\n');
1852         }
1853 #endif
1854
1855         mutex_lock(&dev->mode_config.fb_lock);
1856         drm_for_each_fb(drm_fb, dev) {
1857                 struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
1858                 if (fb == fbdev_fb)
1859                         continue;
1860
1861                 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1862                            fb->base.width,
1863                            fb->base.height,
1864                            fb->base.format->depth,
1865                            fb->base.format->cpp[0] * 8,
1866                            fb->base.modifier,
1867                            drm_framebuffer_read_refcount(&fb->base));
1868                 describe_obj(m, intel_fb_obj(&fb->base));
1869                 seq_putc(m, '\n');
1870         }
1871         mutex_unlock(&dev->mode_config.fb_lock);
1872         mutex_unlock(&dev->struct_mutex);
1873
1874         return 0;
1875 }
1876
1877 static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
1878 {
1879         seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, emit: %u)",
1880                    ring->space, ring->head, ring->tail, ring->emit);
1881 }
1882
1883 static int i915_context_status(struct seq_file *m, void *unused)
1884 {
1885         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1886         struct drm_device *dev = &dev_priv->drm;
1887         struct i915_gem_context *ctx;
1888         int ret;
1889
1890         ret = mutex_lock_interruptible(&dev->struct_mutex);
1891         if (ret)
1892                 return ret;
1893
1894         list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
1895                 struct intel_context *ce;
1896
1897                 seq_puts(m, "HW context ");
1898                 if (!list_empty(&ctx->hw_id_link))
1899                         seq_printf(m, "%x [pin %u]", ctx->hw_id,
1900                                    atomic_read(&ctx->hw_id_pin_count));
1901                 if (ctx->pid) {
1902                         struct task_struct *task;
1903
1904                         task = get_pid_task(ctx->pid, PIDTYPE_PID);
1905                         if (task) {
1906                                 seq_printf(m, "(%s [%d]) ",
1907                                            task->comm, task->pid);
1908                                 put_task_struct(task);
1909                         }
1910                 } else if (IS_ERR(ctx->file_priv)) {
1911                         seq_puts(m, "(deleted) ");
1912                 } else {
1913                         seq_puts(m, "(kernel) ");
1914                 }
1915
1916                 seq_putc(m, ctx->remap_slice ? 'R' : 'r');
1917                 seq_putc(m, '\n');
1918
1919                 list_for_each_entry(ce, &ctx->active_engines, active_link) {
1920                         seq_printf(m, "%s: ", ce->engine->name);
1921                         if (ce->state)
1922                                 describe_obj(m, ce->state->obj);
1923                         if (ce->ring)
1924                                 describe_ctx_ring(m, ce->ring);
1925                         seq_putc(m, '\n');
1926                 }
1927
1928                 seq_putc(m, '\n');
1929         }
1930
1931         mutex_unlock(&dev->struct_mutex);
1932
1933         return 0;
1934 }
1935
1936 static const char *swizzle_string(unsigned swizzle)
1937 {
1938         switch (swizzle) {
1939         case I915_BIT_6_SWIZZLE_NONE:
1940                 return "none";
1941         case I915_BIT_6_SWIZZLE_9:
1942                 return "bit9";
1943         case I915_BIT_6_SWIZZLE_9_10:
1944                 return "bit9/bit10";
1945         case I915_BIT_6_SWIZZLE_9_11:
1946                 return "bit9/bit11";
1947         case I915_BIT_6_SWIZZLE_9_10_11:
1948                 return "bit9/bit10/bit11";
1949         case I915_BIT_6_SWIZZLE_9_17:
1950                 return "bit9/bit17";
1951         case I915_BIT_6_SWIZZLE_9_10_17:
1952                 return "bit9/bit10/bit17";
1953         case I915_BIT_6_SWIZZLE_UNKNOWN:
1954                 return "unknown";
1955         }
1956
1957         return "bug";
1958 }
1959
1960 static int i915_swizzle_info(struct seq_file *m, void *data)
1961 {
1962         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1963         intel_wakeref_t wakeref;
1964
1965         wakeref = intel_runtime_pm_get(dev_priv);
1966
1967         seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
1968                    swizzle_string(dev_priv->mm.bit_6_swizzle_x));
1969         seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
1970                    swizzle_string(dev_priv->mm.bit_6_swizzle_y));
1971
1972         if (IS_GEN_RANGE(dev_priv, 3, 4)) {
1973                 seq_printf(m, "DDC = 0x%08x\n",
1974                            I915_READ(DCC));
1975                 seq_printf(m, "DDC2 = 0x%08x\n",
1976                            I915_READ(DCC2));
1977                 seq_printf(m, "C0DRB3 = 0x%04x\n",
1978                            I915_READ16(C0DRB3));
1979                 seq_printf(m, "C1DRB3 = 0x%04x\n",
1980                            I915_READ16(C1DRB3));
1981         } else if (INTEL_GEN(dev_priv) >= 6) {
1982                 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
1983                            I915_READ(MAD_DIMM_C0));
1984                 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
1985                            I915_READ(MAD_DIMM_C1));
1986                 seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
1987                            I915_READ(MAD_DIMM_C2));
1988                 seq_printf(m, "TILECTL = 0x%08x\n",
1989                            I915_READ(TILECTL));
1990                 if (INTEL_GEN(dev_priv) >= 8)
1991                         seq_printf(m, "GAMTARBMODE = 0x%08x\n",
1992                                    I915_READ(GAMTARBMODE));
1993                 else
1994                         seq_printf(m, "ARB_MODE = 0x%08x\n",
1995                                    I915_READ(ARB_MODE));
1996                 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
1997                            I915_READ(DISP_ARB_CTL));
1998         }
1999
2000         if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
2001                 seq_puts(m, "L-shaped memory detected\n");
2002
2003         intel_runtime_pm_put(dev_priv, wakeref);
2004
2005         return 0;
2006 }
2007
2008 static const char *rps_power_to_str(unsigned int power)
2009 {
2010         static const char * const strings[] = {
2011                 [LOW_POWER] = "low power",
2012                 [BETWEEN] = "mixed",
2013                 [HIGH_POWER] = "high power",
2014         };
2015
2016         if (power >= ARRAY_SIZE(strings) || !strings[power])
2017                 return "unknown";
2018
2019         return strings[power];
2020 }
2021
2022 static int i915_rps_boost_info(struct seq_file *m, void *data)
2023 {
2024         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2025         struct intel_rps *rps = &dev_priv->gt_pm.rps;
2026         u32 act_freq = rps->cur_freq;
2027         intel_wakeref_t wakeref;
2028
2029         with_intel_runtime_pm_if_in_use(dev_priv, wakeref) {
2030                 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
2031                         mutex_lock(&dev_priv->pcu_lock);
2032                         act_freq = vlv_punit_read(dev_priv,
2033                                                   PUNIT_REG_GPU_FREQ_STS);
2034                         act_freq = (act_freq >> 8) & 0xff;
2035                         mutex_unlock(&dev_priv->pcu_lock);
2036                 } else {
2037                         act_freq = intel_get_cagf(dev_priv,
2038                                                   I915_READ(GEN6_RPSTAT1));
2039                 }
2040         }
2041
2042         seq_printf(m, "RPS enabled? %d\n", rps->enabled);
2043         seq_printf(m, "GPU busy? %s [%d requests]\n",
2044                    yesno(dev_priv->gt.awake), dev_priv->gt.active_requests);
2045         seq_printf(m, "Boosts outstanding? %d\n",
2046                    atomic_read(&rps->num_waiters));
2047         seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
2048         seq_printf(m, "Frequency requested %d, actual %d\n",
2049                    intel_gpu_freq(dev_priv, rps->cur_freq),
2050                    intel_gpu_freq(dev_priv, act_freq));
2051         seq_printf(m, "  min hard:%d, soft:%d; max soft:%d, hard:%d\n",
2052                    intel_gpu_freq(dev_priv, rps->min_freq),
2053                    intel_gpu_freq(dev_priv, rps->min_freq_softlimit),
2054                    intel_gpu_freq(dev_priv, rps->max_freq_softlimit),
2055                    intel_gpu_freq(dev_priv, rps->max_freq));
2056         seq_printf(m, "  idle:%d, efficient:%d, boost:%d\n",
2057                    intel_gpu_freq(dev_priv, rps->idle_freq),
2058                    intel_gpu_freq(dev_priv, rps->efficient_freq),
2059                    intel_gpu_freq(dev_priv, rps->boost_freq));
2060
2061         seq_printf(m, "Wait boosts: %d\n", atomic_read(&rps->boosts));
2062
2063         if (INTEL_GEN(dev_priv) >= 6 &&
2064             rps->enabled &&
2065             dev_priv->gt.active_requests) {
2066                 u32 rpup, rpupei;
2067                 u32 rpdown, rpdownei;
2068
2069                 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
2070                 rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
2071                 rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
2072                 rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
2073                 rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
2074                 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
2075
2076                 seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
2077                            rps_power_to_str(rps->power.mode));
2078                 seq_printf(m, "  Avg. up: %d%% [above threshold? %d%%]\n",
2079                            rpup && rpupei ? 100 * rpup / rpupei : 0,
2080                            rps->power.up_threshold);
2081                 seq_printf(m, "  Avg. down: %d%% [below threshold? %d%%]\n",
2082                            rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
2083                            rps->power.down_threshold);
2084         } else {
2085                 seq_puts(m, "\nRPS Autotuning inactive\n");
2086         }
2087
2088         return 0;
2089 }
2090
2091 static int i915_llc(struct seq_file *m, void *data)
2092 {
2093         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2094         const bool edram = INTEL_GEN(dev_priv) > 8;
2095
2096         seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv)));
2097         seq_printf(m, "%s: %uMB\n", edram ? "eDRAM" : "eLLC",
2098                    dev_priv->edram_size_mb);
2099
2100         return 0;
2101 }
2102
2103 static int i915_huc_load_status_info(struct seq_file *m, void *data)
2104 {
2105         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2106         intel_wakeref_t wakeref;
2107         struct drm_printer p;
2108
2109         if (!HAS_HUC(dev_priv))
2110                 return -ENODEV;
2111
2112         p = drm_seq_file_printer(m);
2113         intel_uc_fw_dump(&dev_priv->huc.fw, &p);
2114
2115         with_intel_runtime_pm(dev_priv, wakeref)
2116                 seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
2117
2118         return 0;
2119 }
2120
2121 static int i915_guc_load_status_info(struct seq_file *m, void *data)
2122 {
2123         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2124         intel_wakeref_t wakeref;
2125         struct drm_printer p;
2126
2127         if (!HAS_GUC(dev_priv))
2128                 return -ENODEV;
2129
2130         p = drm_seq_file_printer(m);
2131         intel_uc_fw_dump(&dev_priv->guc.fw, &p);
2132
2133         with_intel_runtime_pm(dev_priv, wakeref) {
2134                 u32 tmp = I915_READ(GUC_STATUS);
2135                 u32 i;
2136
2137                 seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
2138                 seq_printf(m, "\tBootrom status = 0x%x\n",
2139                            (tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
2140                 seq_printf(m, "\tuKernel status = 0x%x\n",
2141                            (tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
2142                 seq_printf(m, "\tMIA Core status = 0x%x\n",
2143                            (tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
2144                 seq_puts(m, "\nScratch registers:\n");
2145                 for (i = 0; i < 16; i++) {
2146                         seq_printf(m, "\t%2d: \t0x%x\n",
2147                                    i, I915_READ(SOFT_SCRATCH(i)));
2148                 }
2149         }
2150
2151         return 0;
2152 }
2153
2154 static const char *
2155 stringify_guc_log_type(enum guc_log_buffer_type type)
2156 {
2157         switch (type) {
2158         case GUC_ISR_LOG_BUFFER:
2159                 return "ISR";
2160         case GUC_DPC_LOG_BUFFER:
2161                 return "DPC";
2162         case GUC_CRASH_DUMP_LOG_BUFFER:
2163                 return "CRASH";
2164         default:
2165                 MISSING_CASE(type);
2166         }
2167
2168         return "";
2169 }
2170
2171 static void i915_guc_log_info(struct seq_file *m,
2172                               struct drm_i915_private *dev_priv)
2173 {
2174         struct intel_guc_log *log = &dev_priv->guc.log;
2175         enum guc_log_buffer_type type;
2176
2177         if (!intel_guc_log_relay_enabled(log)) {
2178                 seq_puts(m, "GuC log relay disabled\n");
2179                 return;
2180         }
2181
2182         seq_puts(m, "GuC logging stats:\n");
2183
2184         seq_printf(m, "\tRelay full count: %u\n",
2185                    log->relay.full_count);
2186
2187         for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
2188                 seq_printf(m, "\t%s:\tflush count %10u, overflow count %10u\n",
2189                            stringify_guc_log_type(type),
2190                            log->stats[type].flush,
2191                            log->stats[type].sampled_overflow);
2192         }
2193 }
2194
2195 static void i915_guc_client_info(struct seq_file *m,
2196                                  struct drm_i915_private *dev_priv,
2197                                  struct intel_guc_client *client)
2198 {
2199         struct intel_engine_cs *engine;
2200         enum intel_engine_id id;
2201         u64 tot = 0;
2202
2203         seq_printf(m, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n",
2204                 client->priority, client->stage_id, client->proc_desc_offset);
2205         seq_printf(m, "\tDoorbell id %d, offset: 0x%lx\n",
2206                 client->doorbell_id, client->doorbell_offset);
2207
2208         for_each_engine(engine, dev_priv, id) {
2209                 u64 submissions = client->submissions[id];
2210                 tot += submissions;
2211                 seq_printf(m, "\tSubmissions: %llu %s\n",
2212                                 submissions, engine->name);
2213         }
2214         seq_printf(m, "\tTotal: %llu\n", tot);
2215 }
2216
2217 static int i915_guc_info(struct seq_file *m, void *data)
2218 {
2219         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2220         const struct intel_guc *guc = &dev_priv->guc;
2221
2222         if (!USES_GUC(dev_priv))
2223                 return -ENODEV;
2224
2225         i915_guc_log_info(m, dev_priv);
2226
2227         if (!USES_GUC_SUBMISSION(dev_priv))
2228                 return 0;
2229
2230         GEM_BUG_ON(!guc->execbuf_client);
2231
2232         seq_printf(m, "\nDoorbell map:\n");
2233         seq_printf(m, "\t%*pb\n", GUC_NUM_DOORBELLS, guc->doorbell_bitmap);
2234         seq_printf(m, "Doorbell next cacheline: 0x%x\n", guc->db_cacheline);
2235
2236         seq_printf(m, "\nGuC execbuf client @ %p:\n", guc->execbuf_client);
2237         i915_guc_client_info(m, dev_priv, guc->execbuf_client);
2238         if (guc->preempt_client) {
2239                 seq_printf(m, "\nGuC preempt client @ %p:\n",
2240                            guc->preempt_client);
2241                 i915_guc_client_info(m, dev_priv, guc->preempt_client);
2242         }
2243
2244         /* Add more as required ... */
2245
2246         return 0;
2247 }
2248
2249 static int i915_guc_stage_pool(struct seq_file *m, void *data)
2250 {
2251         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2252         const struct intel_guc *guc = &dev_priv->guc;
2253         struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr;
2254         struct intel_guc_client *client = guc->execbuf_client;
2255         intel_engine_mask_t tmp;
2256         int index;
2257
2258         if (!USES_GUC_SUBMISSION(dev_priv))
2259                 return -ENODEV;
2260
2261         for (index = 0; index < GUC_MAX_STAGE_DESCRIPTORS; index++, desc++) {
2262                 struct intel_engine_cs *engine;
2263
2264                 if (!(desc->attribute & GUC_STAGE_DESC_ATTR_ACTIVE))
2265                         continue;
2266
2267                 seq_printf(m, "GuC stage descriptor %u:\n", index);
2268                 seq_printf(m, "\tIndex: %u\n", desc->stage_id);
2269                 seq_printf(m, "\tAttribute: 0x%x\n", desc->attribute);
2270                 seq_printf(m, "\tPriority: %d\n", desc->priority);
2271                 seq_printf(m, "\tDoorbell id: %d\n", desc->db_id);
2272                 seq_printf(m, "\tEngines used: 0x%x\n",
2273                            desc->engines_used);
2274                 seq_printf(m, "\tDoorbell trigger phy: 0x%llx, cpu: 0x%llx, uK: 0x%x\n",
2275                            desc->db_trigger_phy,
2276                            desc->db_trigger_cpu,
2277                            desc->db_trigger_uk);
2278                 seq_printf(m, "\tProcess descriptor: 0x%x\n",
2279                            desc->process_desc);
2280                 seq_printf(m, "\tWorkqueue address: 0x%x, size: 0x%x\n",
2281                            desc->wq_addr, desc->wq_size);
2282                 seq_putc(m, '\n');
2283
2284                 for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
2285                         u32 guc_engine_id = engine->guc_id;
2286                         struct guc_execlist_context *lrc =
2287                                                 &desc->lrc[guc_engine_id];
2288
2289                         seq_printf(m, "\t%s LRC:\n", engine->name);
2290                         seq_printf(m, "\t\tContext desc: 0x%x\n",
2291                                    lrc->context_desc);
2292                         seq_printf(m, "\t\tContext id: 0x%x\n", lrc->context_id);
2293                         seq_printf(m, "\t\tLRCA: 0x%x\n", lrc->ring_lrca);
2294                         seq_printf(m, "\t\tRing begin: 0x%x\n", lrc->ring_begin);
2295                         seq_printf(m, "\t\tRing end: 0x%x\n", lrc->ring_end);
2296                         seq_putc(m, '\n');
2297                 }
2298         }
2299
2300         return 0;
2301 }
2302
2303 static int i915_guc_log_dump(struct seq_file *m, void *data)
2304 {
2305         struct drm_info_node *node = m->private;
2306         struct drm_i915_private *dev_priv = node_to_i915(node);
2307         bool dump_load_err = !!node->info_ent->data;
2308         struct drm_i915_gem_object *obj = NULL;
2309         u32 *log;
2310         int i = 0;
2311
2312         if (!HAS_GUC(dev_priv))
2313                 return -ENODEV;
2314
2315         if (dump_load_err)
2316                 obj = dev_priv->guc.load_err_log;
2317         else if (dev_priv->guc.log.vma)
2318                 obj = dev_priv->guc.log.vma->obj;
2319
2320         if (!obj)
2321                 return 0;
2322
2323         log = i915_gem_object_pin_map(obj, I915_MAP_WC);
2324         if (IS_ERR(log)) {
2325                 DRM_DEBUG("Failed to pin object\n");
2326                 seq_puts(m, "(log data unaccessible)\n");
2327                 return PTR_ERR(log);
2328         }
2329
2330         for (i = 0; i < obj->base.size / sizeof(u32); i += 4)
2331                 seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n",
2332                            *(log + i), *(log + i + 1),
2333                            *(log + i + 2), *(log + i + 3));
2334
2335         seq_putc(m, '\n');
2336
2337         i915_gem_object_unpin_map(obj);
2338
2339         return 0;
2340 }
2341
2342 static int i915_guc_log_level_get(void *data, u64 *val)
2343 {
2344         struct drm_i915_private *dev_priv = data;
2345
2346         if (!USES_GUC(dev_priv))
2347                 return -ENODEV;
2348
2349         *val = intel_guc_log_get_level(&dev_priv->guc.log);
2350
2351         return 0;
2352 }
2353
2354 static int i915_guc_log_level_set(void *data, u64 val)
2355 {
2356         struct drm_i915_private *dev_priv = data;
2357
2358         if (!USES_GUC(dev_priv))
2359                 return -ENODEV;
2360
2361         return intel_guc_log_set_level(&dev_priv->guc.log, val);
2362 }
2363
2364 DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_level_fops,
2365                         i915_guc_log_level_get, i915_guc_log_level_set,
2366                         "%lld\n");
2367
2368 static int i915_guc_log_relay_open(struct inode *inode, struct file *file)
2369 {
2370         struct drm_i915_private *dev_priv = inode->i_private;
2371
2372         if (!USES_GUC(dev_priv))
2373                 return -ENODEV;
2374
2375         file->private_data = &dev_priv->guc.log;
2376
2377         return intel_guc_log_relay_open(&dev_priv->guc.log);
2378 }
2379
2380 static ssize_t
2381 i915_guc_log_relay_write(struct file *filp,
2382                          const char __user *ubuf,
2383                          size_t cnt,
2384                          loff_t *ppos)
2385 {
2386         struct intel_guc_log *log = filp->private_data;
2387
2388         intel_guc_log_relay_flush(log);
2389
2390         return cnt;
2391 }
2392
2393 static int i915_guc_log_relay_release(struct inode *inode, struct file *file)
2394 {
2395         struct drm_i915_private *dev_priv = inode->i_private;
2396
2397         intel_guc_log_relay_close(&dev_priv->guc.log);
2398
2399         return 0;
2400 }
2401
2402 static const struct file_operations i915_guc_log_relay_fops = {
2403         .owner = THIS_MODULE,
2404         .open = i915_guc_log_relay_open,
2405         .write = i915_guc_log_relay_write,
2406         .release = i915_guc_log_relay_release,
2407 };
2408
2409 static int i915_psr_sink_status_show(struct seq_file *m, void *data)
2410 {
2411         u8 val;
2412         static const char * const sink_status[] = {
2413                 "inactive",
2414                 "transition to active, capture and display",
2415                 "active, display from RFB",
2416                 "active, capture and display on sink device timings",
2417                 "transition to inactive, capture and display, timing re-sync",
2418                 "reserved",
2419                 "reserved",
2420                 "sink internal error",
2421         };
2422         struct drm_connector *connector = m->private;
2423         struct drm_i915_private *dev_priv = to_i915(connector->dev);
2424         struct intel_dp *intel_dp =
2425                 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
2426         int ret;
2427
2428         if (!CAN_PSR(dev_priv)) {
2429                 seq_puts(m, "PSR Unsupported\n");
2430                 return -ENODEV;
2431         }
2432
2433         if (connector->status != connector_status_connected)
2434                 return -ENODEV;
2435
2436         ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
2437
2438         if (ret == 1) {
2439                 const char *str = "unknown";
2440
2441                 val &= DP_PSR_SINK_STATE_MASK;
2442                 if (val < ARRAY_SIZE(sink_status))
2443                         str = sink_status[val];
2444                 seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
2445         } else {
2446                 return ret;
2447         }
2448
2449         return 0;
2450 }
2451 DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
2452
2453 static void
2454 psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
2455 {
2456         u32 val, status_val;
2457         const char *status = "unknown";
2458
2459         if (dev_priv->psr.psr2_enabled) {
2460                 static const char * const live_status[] = {
2461                         "IDLE",
2462                         "CAPTURE",
2463                         "CAPTURE_FS",
2464                         "SLEEP",
2465                         "BUFON_FW",
2466                         "ML_UP",
2467                         "SU_STANDBY",
2468                         "FAST_SLEEP",
2469                         "DEEP_SLEEP",
2470                         "BUF_ON",
2471                         "TG_ON"
2472                 };
2473                 val = I915_READ(EDP_PSR2_STATUS);
2474                 status_val = (val & EDP_PSR2_STATUS_STATE_MASK) >>
2475                               EDP_PSR2_STATUS_STATE_SHIFT;
2476                 if (status_val < ARRAY_SIZE(live_status))
2477                         status = live_status[status_val];
2478         } else {
2479                 static const char * const live_status[] = {
2480                         "IDLE",
2481                         "SRDONACK",
2482                         "SRDENT",
2483                         "BUFOFF",
2484                         "BUFON",
2485                         "AUXACK",
2486                         "SRDOFFACK",
2487                         "SRDENT_ON",
2488                 };
2489                 val = I915_READ(EDP_PSR_STATUS);
2490                 status_val = (val & EDP_PSR_STATUS_STATE_MASK) >>
2491                               EDP_PSR_STATUS_STATE_SHIFT;
2492                 if (status_val < ARRAY_SIZE(live_status))
2493                         status = live_status[status_val];
2494         }
2495
2496         seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val);
2497 }
2498
2499 static int i915_edp_psr_status(struct seq_file *m, void *data)
2500 {
2501         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2502         struct i915_psr *psr = &dev_priv->psr;
2503         intel_wakeref_t wakeref;
2504         const char *status;
2505         bool enabled;
2506         u32 val;
2507
2508         if (!HAS_PSR(dev_priv))
2509                 return -ENODEV;
2510
2511         seq_printf(m, "Sink support: %s", yesno(psr->sink_support));
2512         if (psr->dp)
2513                 seq_printf(m, " [0x%02x]", psr->dp->psr_dpcd[0]);
2514         seq_puts(m, "\n");
2515
2516         if (!psr->sink_support)
2517                 return 0;
2518
2519         wakeref = intel_runtime_pm_get(dev_priv);
2520         mutex_lock(&psr->lock);
2521
2522         if (psr->enabled)
2523                 status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
2524         else
2525                 status = "disabled";
2526         seq_printf(m, "PSR mode: %s\n", status);
2527
2528         if (!psr->enabled)
2529                 goto unlock;
2530
2531         if (psr->psr2_enabled) {
2532                 val = I915_READ(EDP_PSR2_CTL);
2533                 enabled = val & EDP_PSR2_ENABLE;
2534         } else {
2535                 val = I915_READ(EDP_PSR_CTL);
2536                 enabled = val & EDP_PSR_ENABLE;
2537         }
2538         seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
2539                    enableddisabled(enabled), val);
2540         psr_source_status(dev_priv, m);
2541         seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
2542                    psr->busy_frontbuffer_bits);
2543
2544         /*
2545          * SKL+ Perf counter is reset to 0 everytime DC state is entered
2546          */
2547         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
2548                 val = I915_READ(EDP_PSR_PERF_CNT) & EDP_PSR_PERF_CNT_MASK;
2549                 seq_printf(m, "Performance counter: %u\n", val);
2550         }
2551
2552         if (psr->debug & I915_PSR_DEBUG_IRQ) {
2553                 seq_printf(m, "Last attempted entry at: %lld\n",
2554                            psr->last_entry_attempt);
2555                 seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
2556         }
2557
2558         if (psr->psr2_enabled) {
2559                 u32 su_frames_val[3];
2560                 int frame;
2561
2562                 /*
2563                  * Reading all 3 registers before hand to minimize crossing a
2564                  * frame boundary between register reads
2565                  */
2566                 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3)
2567                         su_frames_val[frame / 3] = I915_READ(PSR2_SU_STATUS(frame));
2568
2569                 seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
2570
2571                 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
2572                         u32 su_blocks;
2573
2574                         su_blocks = su_frames_val[frame / 3] &
2575                                     PSR2_SU_STATUS_MASK(frame);
2576                         su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
2577                         seq_printf(m, "%d\t%d\n", frame, su_blocks);
2578                 }
2579         }
2580
2581 unlock:
2582         mutex_unlock(&psr->lock);
2583         intel_runtime_pm_put(dev_priv, wakeref);
2584
2585         return 0;
2586 }
2587
2588 static int
2589 i915_edp_psr_debug_set(void *data, u64 val)
2590 {
2591         struct drm_i915_private *dev_priv = data;
2592         intel_wakeref_t wakeref;
2593         int ret;
2594
2595         if (!CAN_PSR(dev_priv))
2596                 return -ENODEV;
2597
2598         DRM_DEBUG_KMS("Setting PSR debug to %llx\n", val);
2599
2600         wakeref = intel_runtime_pm_get(dev_priv);
2601
2602         ret = intel_psr_debug_set(dev_priv, val);
2603
2604         intel_runtime_pm_put(dev_priv, wakeref);
2605
2606         return ret;
2607 }
2608
2609 static int
2610 i915_edp_psr_debug_get(void *data, u64 *val)
2611 {
2612         struct drm_i915_private *dev_priv = data;
2613
2614         if (!CAN_PSR(dev_priv))
2615                 return -ENODEV;
2616
2617         *val = READ_ONCE(dev_priv->psr.debug);
2618         return 0;
2619 }
2620
2621 DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
2622                         i915_edp_psr_debug_get, i915_edp_psr_debug_set,
2623                         "%llu\n");
2624
2625 static int i915_energy_uJ(struct seq_file *m, void *data)
2626 {
2627         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2628         unsigned long long power;
2629         intel_wakeref_t wakeref;
2630         u32 units;
2631
2632         if (INTEL_GEN(dev_priv) < 6)
2633                 return -ENODEV;
2634
2635         if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power))
2636                 return -ENODEV;
2637
2638         units = (power & 0x1f00) >> 8;
2639         with_intel_runtime_pm(dev_priv, wakeref)
2640                 power = I915_READ(MCH_SECP_NRG_STTS);
2641
2642         power = (1000000 * power) >> units; /* convert to uJ */
2643         seq_printf(m, "%llu", power);
2644
2645         return 0;
2646 }
2647
2648 static int i915_runtime_pm_status(struct seq_file *m, void *unused)
2649 {
2650         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2651         struct pci_dev *pdev = dev_priv->drm.pdev;
2652
2653         if (!HAS_RUNTIME_PM(dev_priv))
2654                 seq_puts(m, "Runtime power management not supported\n");
2655
2656         seq_printf(m, "Runtime power status: %s\n",
2657                    enableddisabled(!dev_priv->power_domains.wakeref));
2658
2659         seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->gt.awake));
2660         seq_printf(m, "IRQs disabled: %s\n",
2661                    yesno(!intel_irqs_enabled(dev_priv)));
2662 #ifdef CONFIG_PM
2663         seq_printf(m, "Usage count: %d\n",
2664                    atomic_read(&dev_priv->drm.dev->power.usage_count));
2665 #else
2666         seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
2667 #endif
2668         seq_printf(m, "PCI device power state: %s [%d]\n",
2669                    pci_power_name(pdev->current_state),
2670                    pdev->current_state);
2671
2672         if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)) {
2673                 struct drm_printer p = drm_seq_file_printer(m);
2674
2675                 print_intel_runtime_pm_wakeref(dev_priv, &p);
2676         }
2677
2678         return 0;
2679 }
2680
2681 static int i915_power_domain_info(struct seq_file *m, void *unused)
2682 {
2683         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2684         struct i915_power_domains *power_domains = &dev_priv->power_domains;
2685         int i;
2686
2687         mutex_lock(&power_domains->lock);
2688
2689         seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
2690         for (i = 0; i < power_domains->power_well_count; i++) {
2691                 struct i915_power_well *power_well;
2692                 enum intel_display_power_domain power_domain;
2693
2694                 power_well = &power_domains->power_wells[i];
2695                 seq_printf(m, "%-25s %d\n", power_well->desc->name,
2696                            power_well->count);
2697
2698                 for_each_power_domain(power_domain, power_well->desc->domains)
2699                         seq_printf(m, "  %-23s %d\n",
2700                                  intel_display_power_domain_str(power_domain),
2701                                  power_domains->domain_use_count[power_domain]);
2702         }
2703
2704         mutex_unlock(&power_domains->lock);
2705
2706         return 0;
2707 }
2708
2709 static int i915_dmc_info(struct seq_file *m, void *unused)
2710 {
2711         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2712         intel_wakeref_t wakeref;
2713         struct intel_csr *csr;
2714
2715         if (!HAS_CSR(dev_priv))
2716                 return -ENODEV;
2717
2718         csr = &dev_priv->csr;
2719
2720         wakeref = intel_runtime_pm_get(dev_priv);
2721
2722         seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
2723         seq_printf(m, "path: %s\n", csr->fw_path);
2724
2725         if (!csr->dmc_payload)
2726                 goto out;
2727
2728         seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
2729                    CSR_VERSION_MINOR(csr->version));
2730
2731         if (WARN_ON(INTEL_GEN(dev_priv) > 11))
2732                 goto out;
2733
2734         seq_printf(m, "DC3 -> DC5 count: %d\n",
2735                    I915_READ(IS_BROXTON(dev_priv) ? BXT_CSR_DC3_DC5_COUNT :
2736                                                     SKL_CSR_DC3_DC5_COUNT));
2737         if (!IS_GEN9_LP(dev_priv))
2738                 seq_printf(m, "DC5 -> DC6 count: %d\n",
2739                            I915_READ(SKL_CSR_DC5_DC6_COUNT));
2740
2741 out:
2742         seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
2743         seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE));
2744         seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL));
2745
2746         intel_runtime_pm_put(dev_priv, wakeref);
2747
2748         return 0;
2749 }
2750
2751 static void intel_seq_print_mode(struct seq_file *m, int tabs,
2752                                  struct drm_display_mode *mode)
2753 {
2754         int i;
2755
2756         for (i = 0; i < tabs; i++)
2757                 seq_putc(m, '\t');
2758
2759         seq_printf(m, DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
2760 }
2761
2762 static void intel_encoder_info(struct seq_file *m,
2763                                struct intel_crtc *intel_crtc,
2764                                struct intel_encoder *intel_encoder)
2765 {
2766         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2767         struct drm_device *dev = &dev_priv->drm;
2768         struct drm_crtc *crtc = &intel_crtc->base;
2769         struct intel_connector *intel_connector;
2770         struct drm_encoder *encoder;
2771
2772         encoder = &intel_encoder->base;
2773         seq_printf(m, "\tencoder %d: type: %s, connectors:\n",
2774                    encoder->base.id, encoder->name);
2775         for_each_connector_on_encoder(dev, encoder, intel_connector) {
2776                 struct drm_connector *connector = &intel_connector->base;
2777                 seq_printf(m, "\t\tconnector %d: type: %s, status: %s",
2778                            connector->base.id,
2779                            connector->name,
2780                            drm_get_connector_status_name(connector->status));
2781                 if (connector->status == connector_status_connected) {
2782                         struct drm_display_mode *mode = &crtc->mode;
2783                         seq_printf(m, ", mode:\n");
2784                         intel_seq_print_mode(m, 2, mode);
2785                 } else {
2786                         seq_putc(m, '\n');
2787                 }
2788         }
2789 }
2790
2791 static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2792 {
2793         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2794         struct drm_device *dev = &dev_priv->drm;
2795         struct drm_crtc *crtc = &intel_crtc->base;
2796         struct intel_encoder *intel_encoder;
2797         struct drm_plane_state *plane_state = crtc->primary->state;
2798         struct drm_framebuffer *fb = plane_state->fb;
2799
2800         if (fb)
2801                 seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
2802                            fb->base.id, plane_state->src_x >> 16,
2803                            plane_state->src_y >> 16, fb->width, fb->height);
2804         else
2805                 seq_puts(m, "\tprimary plane disabled\n");
2806         for_each_encoder_on_crtc(dev, crtc, intel_encoder)
2807                 intel_encoder_info(m, intel_crtc, intel_encoder);
2808 }
2809
2810 static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
2811 {
2812         struct drm_display_mode *mode = panel->fixed_mode;
2813
2814         seq_printf(m, "\tfixed mode:\n");
2815         intel_seq_print_mode(m, 2, mode);
2816 }
2817
2818 static void intel_dp_info(struct seq_file *m,
2819                           struct intel_connector *intel_connector)
2820 {
2821         struct intel_encoder *intel_encoder = intel_connector->encoder;
2822         struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
2823
2824         seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
2825         seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
2826         if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
2827                 intel_panel_info(m, &intel_connector->panel);
2828
2829         drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
2830                                 &intel_dp->aux);
2831 }
2832
2833 static void intel_dp_mst_info(struct seq_file *m,
2834                           struct intel_connector *intel_connector)
2835 {
2836         struct intel_encoder *intel_encoder = intel_connector->encoder;
2837         struct intel_dp_mst_encoder *intel_mst =
2838                 enc_to_mst(&intel_encoder->base);
2839         struct intel_digital_port *intel_dig_port = intel_mst->primary;
2840         struct intel_dp *intel_dp = &intel_dig_port->dp;
2841         bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
2842                                         intel_connector->port);
2843
2844         seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
2845 }
2846
2847 static void intel_hdmi_info(struct seq_file *m,
2848                             struct intel_connector *intel_connector)
2849 {
2850         struct intel_encoder *intel_encoder = intel_connector->encoder;
2851         struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
2852
2853         seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
2854 }
2855
2856 static void intel_lvds_info(struct seq_file *m,
2857                             struct intel_connector *intel_connector)
2858 {
2859         intel_panel_info(m, &intel_connector->panel);
2860 }
2861
2862 static void intel_connector_info(struct seq_file *m,
2863                                  struct drm_connector *connector)
2864 {
2865         struct intel_connector *intel_connector = to_intel_connector(connector);
2866         struct intel_encoder *intel_encoder = intel_connector->encoder;
2867         struct drm_display_mode *mode;
2868
2869         seq_printf(m, "connector %d: type %s, status: %s\n",
2870                    connector->base.id, connector->name,
2871                    drm_get_connector_status_name(connector->status));
2872
2873         if (connector->status == connector_status_disconnected)
2874                 return;
2875
2876         seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
2877                    connector->display_info.width_mm,
2878                    connector->display_info.height_mm);
2879         seq_printf(m, "\tsubpixel order: %s\n",
2880                    drm_get_subpixel_order_name(connector->display_info.subpixel_order));
2881         seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev);
2882
2883         if (!intel_encoder)
2884                 return;
2885
2886         switch (connector->connector_type) {
2887         case DRM_MODE_CONNECTOR_DisplayPort:
2888         case DRM_MODE_CONNECTOR_eDP:
2889                 if (intel_encoder->type == INTEL_OUTPUT_DP_MST)
2890                         intel_dp_mst_info(m, intel_connector);
2891                 else
2892                         intel_dp_info(m, intel_connector);
2893                 break;
2894         case DRM_MODE_CONNECTOR_LVDS:
2895                 if (intel_encoder->type == INTEL_OUTPUT_LVDS)
2896                         intel_lvds_info(m, intel_connector);
2897                 break;
2898         case DRM_MODE_CONNECTOR_HDMIA:
2899                 if (intel_encoder->type == INTEL_OUTPUT_HDMI ||
2900                     intel_encoder->type == INTEL_OUTPUT_DDI)
2901                         intel_hdmi_info(m, intel_connector);
2902                 break;
2903         default:
2904                 break;
2905         }
2906
2907         seq_printf(m, "\tmodes:\n");
2908         list_for_each_entry(mode, &connector->modes, head)
2909                 intel_seq_print_mode(m, 2, mode);
2910 }
2911
2912 static const char *plane_type(enum drm_plane_type type)
2913 {
2914         switch (type) {
2915         case DRM_PLANE_TYPE_OVERLAY:
2916                 return "OVL";
2917         case DRM_PLANE_TYPE_PRIMARY:
2918                 return "PRI";
2919         case DRM_PLANE_TYPE_CURSOR:
2920                 return "CUR";
2921         /*
2922          * Deliberately omitting default: to generate compiler warnings
2923          * when a new drm_plane_type gets added.
2924          */
2925         }
2926
2927         return "unknown";
2928 }
2929
2930 static void plane_rotation(char *buf, size_t bufsize, unsigned int rotation)
2931 {
2932         /*
2933          * According to doc only one DRM_MODE_ROTATE_ is allowed but this
2934          * will print them all to visualize if the values are misused
2935          */
2936         snprintf(buf, bufsize,
2937                  "%s%s%s%s%s%s(0x%08x)",
2938                  (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
2939                  (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
2940                  (rotation & DRM_MODE_ROTATE_180) ? "180 " : "",
2941                  (rotation & DRM_MODE_ROTATE_270) ? "270 " : "",
2942                  (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
2943                  (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
2944                  rotation);
2945 }
2946
2947 static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2948 {
2949         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2950         struct drm_device *dev = &dev_priv->drm;
2951         struct intel_plane *intel_plane;
2952
2953         for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
2954                 struct drm_plane_state *state;
2955                 struct drm_plane *plane = &intel_plane->base;
2956                 struct drm_format_name_buf format_name;
2957                 char rot_str[48];
2958
2959                 if (!plane->state) {
2960                         seq_puts(m, "plane->state is NULL!\n");
2961                         continue;
2962                 }
2963
2964                 state = plane->state;
2965
2966                 if (state->fb) {
2967                         drm_get_format_name(state->fb->format->format,
2968                                             &format_name);
2969                 } else {
2970                         sprintf(format_name.str, "N/A");
2971                 }
2972
2973                 plane_rotation(rot_str, sizeof(rot_str), state->rotation);
2974
2975                 seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n",
2976                            plane->base.id,
2977                            plane_type(intel_plane->base.type),
2978                            state->crtc_x, state->crtc_y,
2979                            state->crtc_w, state->crtc_h,
2980                            (state->src_x >> 16),
2981                            ((state->src_x & 0xffff) * 15625) >> 10,
2982                            (state->src_y >> 16),
2983                            ((state->src_y & 0xffff) * 15625) >> 10,
2984                            (state->src_w >> 16),
2985                            ((state->src_w & 0xffff) * 15625) >> 10,
2986                            (state->src_h >> 16),
2987                            ((state->src_h & 0xffff) * 15625) >> 10,
2988                            format_name.str,
2989                            rot_str);
2990         }
2991 }
2992
2993 static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2994 {
2995         struct intel_crtc_state *pipe_config;
2996         int num_scalers = intel_crtc->num_scalers;
2997         int i;
2998
2999         pipe_config = to_intel_crtc_state(intel_crtc->base.state);
3000
3001         /* Not all platformas have a scaler */
3002         if (num_scalers) {
3003                 seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
3004                            num_scalers,
3005                            pipe_config->scaler_state.scaler_users,
3006                            pipe_config->scaler_state.scaler_id);
3007
3008                 for (i = 0; i < num_scalers; i++) {
3009                         struct intel_scaler *sc =
3010                                         &pipe_config->scaler_state.scalers[i];
3011
3012                         seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
3013                                    i, yesno(sc->in_use), sc->mode);
3014                 }
3015                 seq_puts(m, "\n");
3016         } else {
3017                 seq_puts(m, "\tNo scalers available on this platform\n");
3018         }
3019 }
3020
3021 static int i915_display_info(struct seq_file *m, void *unused)
3022 {
3023         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3024         struct drm_device *dev = &dev_priv->drm;
3025         struct intel_crtc *crtc;
3026         struct drm_connector *connector;
3027         struct drm_connector_list_iter conn_iter;
3028         intel_wakeref_t wakeref;
3029
3030         wakeref = intel_runtime_pm_get(dev_priv);
3031
3032         seq_printf(m, "CRTC info\n");
3033         seq_printf(m, "---------\n");
3034         for_each_intel_crtc(dev, crtc) {
3035                 struct intel_crtc_state *pipe_config;
3036
3037                 drm_modeset_lock(&crtc->base.mutex, NULL);
3038                 pipe_config = to_intel_crtc_state(crtc->base.state);
3039
3040                 seq_printf(m, "CRTC %d: pipe: %c, active=%s, (size=%dx%d), dither=%s, bpp=%d\n",
3041                            crtc->base.base.id, pipe_name(crtc->pipe),
3042                            yesno(pipe_config->base.active),
3043                            pipe_config->pipe_src_w, pipe_config->pipe_src_h,
3044                            yesno(pipe_config->dither), pipe_config->pipe_bpp);
3045
3046                 if (pipe_config->base.active) {
3047                         struct intel_plane *cursor =
3048                                 to_intel_plane(crtc->base.cursor);
3049
3050                         intel_crtc_info(m, crtc);
3051
3052                         seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x\n",
3053                                    yesno(cursor->base.state->visible),
3054                                    cursor->base.state->crtc_x,
3055                                    cursor->base.state->crtc_y,
3056                                    cursor->base.state->crtc_w,
3057                                    cursor->base.state->crtc_h,
3058                                    cursor->cursor.base);
3059                         intel_scaler_info(m, crtc);
3060                         intel_plane_info(m, crtc);
3061                 }
3062
3063                 seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
3064                            yesno(!crtc->cpu_fifo_underrun_disabled),
3065                            yesno(!crtc->pch_fifo_underrun_disabled));
3066                 drm_modeset_unlock(&crtc->base.mutex);
3067         }
3068
3069         seq_printf(m, "\n");
3070         seq_printf(m, "Connector info\n");
3071         seq_printf(m, "--------------\n");
3072         mutex_lock(&dev->mode_config.mutex);
3073         drm_connector_list_iter_begin(dev, &conn_iter);
3074         drm_for_each_connector_iter(connector, &conn_iter)
3075                 intel_connector_info(m, connector);
3076         drm_connector_list_iter_end(&conn_iter);
3077         mutex_unlock(&dev->mode_config.mutex);
3078
3079         intel_runtime_pm_put(dev_priv, wakeref);
3080
3081         return 0;
3082 }
3083
3084 static int i915_engine_info(struct seq_file *m, void *unused)
3085 {
3086         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3087         struct intel_engine_cs *engine;
3088         intel_wakeref_t wakeref;
3089         enum intel_engine_id id;
3090         struct drm_printer p;
3091
3092         wakeref = intel_runtime_pm_get(dev_priv);
3093
3094         seq_printf(m, "GT awake? %s\n", yesno(dev_priv->gt.awake));
3095         seq_printf(m, "Global active requests: %d\n",
3096                    dev_priv->gt.active_requests);
3097         seq_printf(m, "CS timestamp frequency: %u kHz\n",
3098                    RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz);
3099
3100         p = drm_seq_file_printer(m);
3101         for_each_engine(engine, dev_priv, id)
3102                 intel_engine_dump(engine, &p, "%s\n", engine->name);
3103
3104         intel_runtime_pm_put(dev_priv, wakeref);
3105
3106         return 0;
3107 }
3108
3109 static int i915_rcs_topology(struct seq_file *m, void *unused)
3110 {
3111         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3112         struct drm_printer p = drm_seq_file_printer(m);
3113
3114         intel_device_info_dump_topology(&RUNTIME_INFO(dev_priv)->sseu, &p);
3115
3116         return 0;
3117 }
3118
3119 static int i915_shrinker_info(struct seq_file *m, void *unused)
3120 {
3121         struct drm_i915_private *i915 = node_to_i915(m->private);
3122
3123         seq_printf(m, "seeks = %d\n", i915->mm.shrinker.seeks);
3124         seq_printf(m, "batch = %lu\n", i915->mm.shrinker.batch);
3125
3126         return 0;
3127 }
3128
3129 static int i915_shared_dplls_info(struct seq_file *m, void *unused)
3130 {
3131         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3132         struct drm_device *dev = &dev_priv->drm;
3133         int i;
3134
3135         drm_modeset_lock_all(dev);
3136         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3137                 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
3138
3139                 seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name,
3140                            pll->info->id);
3141                 seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
3142                            pll->state.crtc_mask, pll->active_mask, yesno(pll->on));
3143                 seq_printf(m, " tracked hardware state:\n");
3144                 seq_printf(m, " dpll:    0x%08x\n", pll->state.hw_state.dpll);
3145                 seq_printf(m, " dpll_md: 0x%08x\n",
3146                            pll->state.hw_state.dpll_md);
3147                 seq_printf(m, " fp0:     0x%08x\n", pll->state.hw_state.fp0);
3148                 seq_printf(m, " fp1:     0x%08x\n", pll->state.hw_state.fp1);
3149                 seq_printf(m, " wrpll:   0x%08x\n", pll->state.hw_state.wrpll);
3150                 seq_printf(m, " cfgcr0:  0x%08x\n", pll->state.hw_state.cfgcr0);
3151                 seq_printf(m, " cfgcr1:  0x%08x\n", pll->state.hw_state.cfgcr1);
3152                 seq_printf(m, " mg_refclkin_ctl:        0x%08x\n",
3153                            pll->state.hw_state.mg_refclkin_ctl);
3154                 seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n",
3155                            pll->state.hw_state.mg_clktop2_coreclkctl1);
3156                 seq_printf(m, " mg_clktop2_hsclkctl:    0x%08x\n",
3157                            pll->state.hw_state.mg_clktop2_hsclkctl);
3158                 seq_printf(m, " mg_pll_div0:  0x%08x\n",
3159                            pll->state.hw_state.mg_pll_div0);
3160                 seq_printf(m, " mg_pll_div1:  0x%08x\n",
3161                            pll->state.hw_state.mg_pll_div1);
3162                 seq_printf(m, " mg_pll_lf:    0x%08x\n",
3163                            pll->state.hw_state.mg_pll_lf);
3164                 seq_printf(m, " mg_pll_frac_lock: 0x%08x\n",
3165                            pll->state.hw_state.mg_pll_frac_lock);
3166                 seq_printf(m, " mg_pll_ssc:   0x%08x\n",
3167                            pll->state.hw_state.mg_pll_ssc);
3168                 seq_printf(m, " mg_pll_bias:  0x%08x\n",
3169                            pll->state.hw_state.mg_pll_bias);
3170                 seq_printf(m, " mg_pll_tdc_coldst_bias: 0x%08x\n",
3171                            pll->state.hw_state.mg_pll_tdc_coldst_bias);
3172         }
3173         drm_modeset_unlock_all(dev);
3174
3175         return 0;
3176 }
3177
3178 static int i915_wa_registers(struct seq_file *m, void *unused)
3179 {
3180         struct drm_i915_private *i915 = node_to_i915(m->private);
3181         const struct i915_wa_list *wal = &i915->engine[RCS0]->ctx_wa_list;
3182         struct i915_wa *wa;
3183         unsigned int i;
3184
3185         seq_printf(m, "Workarounds applied: %u\n", wal->count);
3186         for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
3187                 seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n",
3188                            i915_mmio_reg_offset(wa->reg), wa->val, wa->mask);
3189
3190         return 0;
3191 }
3192
3193 static int i915_ipc_status_show(struct seq_file *m, void *data)
3194 {
3195         struct drm_i915_private *dev_priv = m->private;
3196
3197         seq_printf(m, "Isochronous Priority Control: %s\n",
3198                         yesno(dev_priv->ipc_enabled));
3199         return 0;
3200 }
3201
3202 static int i915_ipc_status_open(struct inode *inode, struct file *file)
3203 {
3204         struct drm_i915_private *dev_priv = inode->i_private;
3205
3206         if (!HAS_IPC(dev_priv))
3207                 return -ENODEV;
3208
3209         return single_open(file, i915_ipc_status_show, dev_priv);
3210 }
3211
3212 static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
3213                                      size_t len, loff_t *offp)
3214 {
3215         struct seq_file *m = file->private_data;
3216         struct drm_i915_private *dev_priv = m->private;
3217         intel_wakeref_t wakeref;
3218         bool enable;
3219         int ret;
3220
3221         ret = kstrtobool_from_user(ubuf, len, &enable);
3222         if (ret < 0)
3223                 return ret;
3224
3225         with_intel_runtime_pm(dev_priv, wakeref) {
3226                 if (!dev_priv->ipc_enabled && enable)
3227                         DRM_INFO("Enabling IPC: WM will be proper only after next commit\n");
3228                 dev_priv->wm.distrust_bios_wm = true;
3229                 dev_priv->ipc_enabled = enable;
3230                 intel_enable_ipc(dev_priv);
3231         }
3232
3233         return len;
3234 }
3235
3236 static const struct file_operations i915_ipc_status_fops = {
3237         .owner = THIS_MODULE,
3238         .open = i915_ipc_status_open,
3239         .read = seq_read,
3240         .llseek = seq_lseek,
3241         .release = single_release,
3242         .write = i915_ipc_status_write
3243 };
3244
3245 static int i915_ddb_info(struct seq_file *m, void *unused)
3246 {
3247         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3248         struct drm_device *dev = &dev_priv->drm;
3249         struct skl_ddb_entry *entry;
3250         struct intel_crtc *crtc;
3251
3252         if (INTEL_GEN(dev_priv) < 9)
3253                 return -ENODEV;
3254
3255         drm_modeset_lock_all(dev);
3256
3257         seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
3258
3259         for_each_intel_crtc(&dev_priv->drm, crtc) {
3260                 struct intel_crtc_state *crtc_state =
3261                         to_intel_crtc_state(crtc->base.state);
3262                 enum pipe pipe = crtc->pipe;
3263                 enum plane_id plane_id;
3264
3265                 seq_printf(m, "Pipe %c\n", pipe_name(pipe));
3266
3267                 for_each_plane_id_on_crtc(crtc, plane_id) {
3268                         entry = &crtc_state->wm.skl.plane_ddb_y[plane_id];
3269                         seq_printf(m, "  Plane%-8d%8u%8u%8u\n", plane_id + 1,
3270                                    entry->start, entry->end,
3271                                    skl_ddb_entry_size(entry));
3272                 }
3273
3274                 entry = &crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
3275                 seq_printf(m, "  %-13s%8u%8u%8u\n", "Cursor", entry->start,
3276                            entry->end, skl_ddb_entry_size(entry));
3277         }
3278
3279         drm_modeset_unlock_all(dev);
3280
3281         return 0;
3282 }
3283
3284 static void drrs_status_per_crtc(struct seq_file *m,
3285                                  struct drm_device *dev,
3286                                  struct intel_crtc *intel_crtc)
3287 {
3288         struct drm_i915_private *dev_priv = to_i915(dev);
3289         struct i915_drrs *drrs = &dev_priv->drrs;
3290         int vrefresh = 0;
3291         struct drm_connector *connector;
3292         struct drm_connector_list_iter conn_iter;
3293
3294         drm_connector_list_iter_begin(dev, &conn_iter);
3295         drm_for_each_connector_iter(connector, &conn_iter) {
3296                 if (connector->state->crtc != &intel_crtc->base)
3297                         continue;
3298
3299                 seq_printf(m, "%s:\n", connector->name);
3300         }
3301         drm_connector_list_iter_end(&conn_iter);
3302
3303         if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT)
3304                 seq_puts(m, "\tVBT: DRRS_type: Static");
3305         else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT)
3306                 seq_puts(m, "\tVBT: DRRS_type: Seamless");
3307         else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED)
3308                 seq_puts(m, "\tVBT: DRRS_type: None");
3309         else
3310                 seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value");
3311
3312         seq_puts(m, "\n\n");
3313
3314         if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
3315                 struct intel_panel *panel;
3316
3317                 mutex_lock(&drrs->mutex);
3318                 /* DRRS Supported */
3319                 seq_puts(m, "\tDRRS Supported: Yes\n");
3320
3321                 /* disable_drrs() will make drrs->dp NULL */
3322                 if (!drrs->dp) {
3323                         seq_puts(m, "Idleness DRRS: Disabled\n");
3324                         if (dev_priv->psr.enabled)
3325                                 seq_puts(m,
3326                                 "\tAs PSR is enabled, DRRS is not enabled\n");
3327                         mutex_unlock(&drrs->mutex);
3328                         return;
3329                 }
3330
3331                 panel = &drrs->dp->attached_connector->panel;
3332                 seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
3333                                         drrs->busy_frontbuffer_bits);
3334
3335                 seq_puts(m, "\n\t\t");
3336                 if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
3337                         seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
3338                         vrefresh = panel->fixed_mode->vrefresh;
3339                 } else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
3340                         seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
3341                         vrefresh = panel->downclock_mode->vrefresh;
3342                 } else {
3343                         seq_printf(m, "DRRS_State: Unknown(%d)\n",
3344                                                 drrs->refresh_rate_type);
3345                         mutex_unlock(&drrs->mutex);
3346                         return;
3347                 }
3348                 seq_printf(m, "\t\tVrefresh: %d", vrefresh);
3349
3350                 seq_puts(m, "\n\t\t");
3351                 mutex_unlock(&drrs->mutex);
3352         } else {
3353                 /* DRRS not supported. Print the VBT parameter*/
3354                 seq_puts(m, "\tDRRS Supported : No");
3355         }
3356         seq_puts(m, "\n");
3357 }
3358
3359 static int i915_drrs_status(struct seq_file *m, void *unused)
3360 {
3361         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3362         struct drm_device *dev = &dev_priv->drm;
3363         struct intel_crtc *intel_crtc;
3364         int active_crtc_cnt = 0;
3365
3366         drm_modeset_lock_all(dev);
3367         for_each_intel_crtc(dev, intel_crtc) {
3368                 if (intel_crtc->base.state->active) {
3369                         active_crtc_cnt++;
3370                         seq_printf(m, "\nCRTC %d:  ", active_crtc_cnt);
3371
3372                         drrs_status_per_crtc(m, dev, intel_crtc);
3373                 }
3374         }
3375         drm_modeset_unlock_all(dev);
3376
3377         if (!active_crtc_cnt)
3378                 seq_puts(m, "No active crtc found\n");
3379
3380         return 0;
3381 }
3382
3383 static int i915_dp_mst_info(struct seq_file *m, void *unused)
3384 {
3385         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3386         struct drm_device *dev = &dev_priv->drm;
3387         struct intel_encoder *intel_encoder;
3388         struct intel_digital_port *intel_dig_port;
3389         struct drm_connector *connector;
3390         struct drm_connector_list_iter conn_iter;
3391
3392         drm_connector_list_iter_begin(dev, &conn_iter);
3393         drm_for_each_connector_iter(connector, &conn_iter) {
3394                 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
3395                         continue;
3396
3397                 intel_encoder = intel_attached_encoder(connector);
3398                 if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
3399                         continue;
3400
3401                 intel_dig_port = enc_to_dig_port(&intel_encoder->base);
3402                 if (!intel_dig_port->dp.can_mst)
3403                         continue;
3404
3405                 seq_printf(m, "MST Source Port %c\n",
3406                            port_name(intel_dig_port->base.port));
3407                 drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
3408         }
3409         drm_connector_list_iter_end(&conn_iter);
3410
3411         return 0;
3412 }
3413
3414 static ssize_t i915_displayport_test_active_write(struct file *file,
3415                                                   const char __user *ubuf,
3416                                                   size_t len, loff_t *offp)
3417 {
3418         char *input_buffer;
3419         int status = 0;
3420         struct drm_device *dev;
3421         struct drm_connector *connector;
3422         struct drm_connector_list_iter conn_iter;
3423         struct intel_dp *intel_dp;
3424         int val = 0;
3425
3426         dev = ((struct seq_file *)file->private_data)->private;
3427
3428         if (len == 0)
3429                 return 0;
3430
3431         input_buffer = memdup_user_nul(ubuf, len);
3432         if (IS_ERR(input_buffer))
3433                 return PTR_ERR(input_buffer);
3434
3435         DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);
3436
3437         drm_connector_list_iter_begin(dev, &conn_iter);
3438         drm_for_each_connector_iter(connector, &conn_iter) {
3439                 struct intel_encoder *encoder;
3440
3441                 if (connector->connector_type !=
3442                     DRM_MODE_CONNECTOR_DisplayPort)
3443                         continue;
3444
3445                 encoder = to_intel_encoder(connector->encoder);
3446                 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3447                         continue;
3448
3449                 if (encoder && connector->status == connector_status_connected) {
3450                         intel_dp = enc_to_intel_dp(&encoder->base);
3451                         status = kstrtoint(input_buffer, 10, &val);
3452                         if (status < 0)
3453                                 break;
3454                         DRM_DEBUG_DRIVER("Got %d for test active\n", val);
3455                         /* To prevent erroneous activation of the compliance
3456                          * testing code, only accept an actual value of 1 here
3457                          */
3458                         if (val == 1)
3459                                 intel_dp->compliance.test_active = 1;
3460                         else
3461                                 intel_dp->compliance.test_active = 0;
3462                 }
3463         }
3464         drm_connector_list_iter_end(&conn_iter);
3465         kfree(input_buffer);
3466         if (status < 0)
3467                 return status;
3468
3469         *offp += len;
3470         return len;
3471 }
3472
3473 static int i915_displayport_test_active_show(struct seq_file *m, void *data)
3474 {
3475         struct drm_i915_private *dev_priv = m->private;
3476         struct drm_device *dev = &dev_priv->drm;
3477         struct drm_connector *connector;
3478         struct drm_connector_list_iter conn_iter;
3479         struct intel_dp *intel_dp;
3480
3481         drm_connector_list_iter_begin(dev, &conn_iter);
3482         drm_for_each_connector_iter(connector, &conn_iter) {
3483                 struct intel_encoder *encoder;
3484
3485                 if (connector->connector_type !=
3486                     DRM_MODE_CONNECTOR_DisplayPort)
3487                         continue;
3488
3489                 encoder = to_intel_encoder(connector->encoder);
3490                 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3491                         continue;
3492
3493                 if (encoder && connector->status == connector_status_connected) {
3494                         intel_dp = enc_to_intel_dp(&encoder->base);
3495                         if (intel_dp->compliance.test_active)
3496                                 seq_puts(m, "1");
3497                         else
3498                                 seq_puts(m, "0");
3499                 } else
3500                         seq_puts(m, "0");
3501         }
3502         drm_connector_list_iter_end(&conn_iter);
3503
3504         return 0;
3505 }
3506
3507 static int i915_displayport_test_active_open(struct inode *inode,
3508                                              struct file *file)
3509 {
3510         return single_open(file, i915_displayport_test_active_show,
3511                            inode->i_private);
3512 }
3513
3514 static const struct file_operations i915_displayport_test_active_fops = {
3515         .owner = THIS_MODULE,
3516         .open = i915_displayport_test_active_open,
3517         .read = seq_read,
3518         .llseek = seq_lseek,
3519         .release = single_release,
3520         .write = i915_displayport_test_active_write
3521 };
3522
3523 static int i915_displayport_test_data_show(struct seq_file *m, void *data)
3524 {
3525         struct drm_i915_private *dev_priv = m->private;
3526         struct drm_device *dev = &dev_priv->drm;
3527         struct drm_connector *connector;
3528         struct drm_connector_list_iter conn_iter;
3529         struct intel_dp *intel_dp;
3530
3531         drm_connector_list_iter_begin(dev, &conn_iter);
3532         drm_for_each_connector_iter(connector, &conn_iter) {
3533                 struct intel_encoder *encoder;
3534
3535                 if (connector->connector_type !=
3536                     DRM_MODE_CONNECTOR_DisplayPort)
3537                         continue;
3538
3539                 encoder = to_intel_encoder(connector->encoder);
3540                 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3541                         continue;
3542
3543                 if (encoder && connector->status == connector_status_connected) {
3544                         intel_dp = enc_to_intel_dp(&encoder->base);
3545                         if (intel_dp->compliance.test_type ==
3546                             DP_TEST_LINK_EDID_READ)
3547                                 seq_printf(m, "%lx",
3548                                            intel_dp->compliance.test_data.edid);
3549                         else if (intel_dp->compliance.test_type ==
3550                                  DP_TEST_LINK_VIDEO_PATTERN) {
3551                                 seq_printf(m, "hdisplay: %d\n",
3552                                            intel_dp->compliance.test_data.hdisplay);
3553                                 seq_printf(m, "vdisplay: %d\n",
3554                                            intel_dp->compliance.test_data.vdisplay);
3555                                 seq_printf(m, "bpc: %u\n",
3556                                            intel_dp->compliance.test_data.bpc);
3557                         }
3558                 } else
3559                         seq_puts(m, "0");
3560         }
3561         drm_connector_list_iter_end(&conn_iter);
3562
3563         return 0;
3564 }
3565 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data);
3566
3567 static int i915_displayport_test_type_show(struct seq_file *m, void *data)
3568 {
3569         struct drm_i915_private *dev_priv = m->private;
3570         struct drm_device *dev = &dev_priv->drm;
3571         struct drm_connector *connector;
3572         struct drm_connector_list_iter conn_iter;
3573         struct intel_dp *intel_dp;
3574
3575         drm_connector_list_iter_begin(dev, &conn_iter);
3576         drm_for_each_connector_iter(connector, &conn_iter) {
3577                 struct intel_encoder *encoder;
3578
3579                 if (connector->connector_type !=
3580                     DRM_MODE_CONNECTOR_DisplayPort)
3581                         continue;
3582
3583                 encoder = to_intel_encoder(connector->encoder);
3584                 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3585                         continue;
3586
3587                 if (encoder && connector->status == connector_status_connected) {
3588                         intel_dp = enc_to_intel_dp(&encoder->base);
3589                         seq_printf(m, "%02lx", intel_dp->compliance.test_type);
3590                 } else
3591                         seq_puts(m, "0");
3592         }
3593         drm_connector_list_iter_end(&conn_iter);
3594
3595         return 0;
3596 }
3597 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type);
3598
3599 static void wm_latency_show(struct seq_file *m, const u16 wm[8])
3600 {
3601         struct drm_i915_private *dev_priv = m->private;
3602         struct drm_device *dev = &dev_priv->drm;
3603         int level;
3604         int num_levels;
3605
3606         if (IS_CHERRYVIEW(dev_priv))
3607                 num_levels = 3;
3608         else if (IS_VALLEYVIEW(dev_priv))
3609                 num_levels = 1;
3610         else if (IS_G4X(dev_priv))
3611                 num_levels = 3;
3612         else
3613                 num_levels = ilk_wm_max_level(dev_priv) + 1;
3614
3615         drm_modeset_lock_all(dev);
3616
3617         for (level = 0; level < num_levels; level++) {
3618                 unsigned int latency = wm[level];
3619
3620                 /*
3621                  * - WM1+ latency values in 0.5us units
3622                  * - latencies are in us on gen9/vlv/chv
3623                  */
3624                 if (INTEL_GEN(dev_priv) >= 9 ||
3625                     IS_VALLEYVIEW(dev_priv) ||
3626                     IS_CHERRYVIEW(dev_priv) ||
3627                     IS_G4X(dev_priv))
3628                         latency *= 10;
3629                 else if (level > 0)
3630                         latency *= 5;
3631
3632                 seq_printf(m, "WM%d %u (%u.%u usec)\n",
3633                            level, wm[level], latency / 10, latency % 10);
3634         }
3635
3636         drm_modeset_unlock_all(dev);
3637 }
3638
3639 static int pri_wm_latency_show(struct seq_file *m, void *data)
3640 {
3641         struct drm_i915_private *dev_priv = m->private;
3642         const u16 *latencies;
3643
3644         if (INTEL_GEN(dev_priv) >= 9)
3645                 latencies = dev_priv->wm.skl_latency;
3646         else
3647                 latencies = dev_priv->wm.pri_latency;
3648
3649         wm_latency_show(m, latencies);
3650
3651         return 0;
3652 }
3653
3654 static int spr_wm_latency_show(struct seq_file *m, void *data)
3655 {
3656         struct drm_i915_private *dev_priv = m->private;
3657         const u16 *latencies;
3658
3659         if (INTEL_GEN(dev_priv) >= 9)
3660                 latencies = dev_priv->wm.skl_latency;
3661         else
3662                 latencies = dev_priv->wm.spr_latency;
3663
3664         wm_latency_show(m, latencies);
3665
3666         return 0;
3667 }
3668
3669 static int cur_wm_latency_show(struct seq_file *m, void *data)
3670 {
3671         struct drm_i915_private *dev_priv = m->private;
3672         const u16 *latencies;
3673
3674         if (INTEL_GEN(dev_priv) >= 9)
3675                 latencies = dev_priv->wm.skl_latency;
3676         else
3677                 latencies = dev_priv->wm.cur_latency;
3678
3679         wm_latency_show(m, latencies);
3680
3681         return 0;
3682 }
3683
3684 static int pri_wm_latency_open(struct inode *inode, struct file *file)
3685 {
3686         struct drm_i915_private *dev_priv = inode->i_private;
3687
3688         if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
3689                 return -ENODEV;
3690
3691         return single_open(file, pri_wm_latency_show, dev_priv);
3692 }
3693
3694 static int spr_wm_latency_open(struct inode *inode, struct file *file)
3695 {
3696         struct drm_i915_private *dev_priv = inode->i_private;
3697
3698         if (HAS_GMCH(dev_priv))
3699                 return -ENODEV;
3700
3701         return single_open(file, spr_wm_latency_show, dev_priv);
3702 }
3703
3704 static int cur_wm_latency_open(struct inode *inode, struct file *file)
3705 {
3706         struct drm_i915_private *dev_priv = inode->i_private;
3707
3708         if (HAS_GMCH(dev_priv))
3709                 return -ENODEV;
3710
3711         return single_open(file, cur_wm_latency_show, dev_priv);
3712 }
3713
3714 static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
3715                                 size_t len, loff_t *offp, u16 wm[8])
3716 {
3717         struct seq_file *m = file->private_data;
3718         struct drm_i915_private *dev_priv = m->private;
3719         struct drm_device *dev = &dev_priv->drm;
3720         u16 new[8] = { 0 };
3721         int num_levels;
3722         int level;
3723         int ret;
3724         char tmp[32];
3725
3726         if (IS_CHERRYVIEW(dev_priv))
3727                 num_levels = 3;
3728         else if (IS_VALLEYVIEW(dev_priv))
3729                 num_levels = 1;
3730         else if (IS_G4X(dev_priv))
3731                 num_levels = 3;
3732         else
3733                 num_levels = ilk_wm_max_level(dev_priv) + 1;
3734
3735         if (len >= sizeof(tmp))
3736                 return -EINVAL;
3737
3738         if (copy_from_user(tmp, ubuf, len))
3739                 return -EFAULT;
3740
3741         tmp[len] = '\0';
3742
3743         ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
3744                      &new[0], &new[1], &new[2], &new[3],
3745                      &new[4], &new[5], &new[6], &new[7]);
3746         if (ret != num_levels)
3747                 return -EINVAL;
3748
3749         drm_modeset_lock_all(dev);
3750
3751         for (level = 0; level < num_levels; level++)
3752                 wm[level] = new[level];
3753
3754         drm_modeset_unlock_all(dev);
3755
3756         return len;
3757 }
3758
3759
3760 static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
3761                                     size_t len, loff_t *offp)
3762 {
3763         struct seq_file *m = file->private_data;
3764         struct drm_i915_private *dev_priv = m->private;
3765         u16 *latencies;
3766
3767         if (INTEL_GEN(dev_priv) >= 9)
3768                 latencies = dev_priv->wm.skl_latency;
3769         else
3770                 latencies = dev_priv->wm.pri_latency;
3771
3772         return wm_latency_write(file, ubuf, len, offp, latencies);
3773 }
3774
3775 static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
3776                                     size_t len, loff_t *offp)
3777 {
3778         struct seq_file *m = file->private_data;
3779         struct drm_i915_private *dev_priv = m->private;
3780         u16 *latencies;
3781
3782         if (INTEL_GEN(dev_priv) >= 9)
3783                 latencies = dev_priv->wm.skl_latency;
3784         else
3785                 latencies = dev_priv->wm.spr_latency;
3786
3787         return wm_latency_write(file, ubuf, len, offp, latencies);
3788 }
3789
3790 static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
3791                                     size_t len, loff_t *offp)
3792 {
3793         struct seq_file *m = file->private_data;
3794         struct drm_i915_private *dev_priv = m->private;
3795         u16 *latencies;
3796
3797         if (INTEL_GEN(dev_priv) >= 9)
3798                 latencies = dev_priv->wm.skl_latency;
3799         else
3800                 latencies = dev_priv->wm.cur_latency;
3801
3802         return wm_latency_write(file, ubuf, len, offp, latencies);
3803 }
3804
3805 static const struct file_operations i915_pri_wm_latency_fops = {
3806         .owner = THIS_MODULE,
3807         .open = pri_wm_latency_open,
3808         .read = seq_read,
3809         .llseek = seq_lseek,
3810         .release = single_release,
3811         .write = pri_wm_latency_write
3812 };
3813
3814 static const struct file_operations i915_spr_wm_latency_fops = {
3815         .owner = THIS_MODULE,
3816         .open = spr_wm_latency_open,
3817         .read = seq_read,
3818         .llseek = seq_lseek,
3819         .release = single_release,
3820         .write = spr_wm_latency_write
3821 };
3822
3823 static const struct file_operations i915_cur_wm_latency_fops = {
3824         .owner = THIS_MODULE,
3825         .open = cur_wm_latency_open,
3826         .read = seq_read,
3827         .llseek = seq_lseek,
3828         .release = single_release,
3829         .write = cur_wm_latency_write
3830 };
3831
3832 static int
3833 i915_wedged_get(void *data, u64 *val)
3834 {
3835         int ret = i915_terminally_wedged(data);
3836
3837         switch (ret) {
3838         case -EIO:
3839                 *val = 1;
3840                 return 0;
3841         case 0:
3842                 *val = 0;
3843                 return 0;
3844         default:
3845                 return ret;
3846         }
3847 }
3848
3849 static int
3850 i915_wedged_set(void *data, u64 val)
3851 {
3852         struct drm_i915_private *i915 = data;
3853
3854         /* Flush any previous reset before applying for a new one */
3855         wait_event(i915->gpu_error.reset_queue,
3856                    !test_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags));
3857
3858         i915_handle_error(i915, val, I915_ERROR_CAPTURE,
3859                           "Manually set wedged engine mask = %llx", val);
3860         return 0;
3861 }
3862
3863 DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
3864                         i915_wedged_get, i915_wedged_set,
3865                         "%llu\n");
3866
3867 #define DROP_UNBOUND    BIT(0)
3868 #define DROP_BOUND      BIT(1)
3869 #define DROP_RETIRE     BIT(2)
3870 #define DROP_ACTIVE     BIT(3)
3871 #define DROP_FREED      BIT(4)
3872 #define DROP_SHRINK_ALL BIT(5)
3873 #define DROP_IDLE       BIT(6)
3874 #define DROP_RESET_ACTIVE       BIT(7)
3875 #define DROP_RESET_SEQNO        BIT(8)
3876 #define DROP_ALL (DROP_UNBOUND  | \
3877                   DROP_BOUND    | \
3878                   DROP_RETIRE   | \
3879                   DROP_ACTIVE   | \
3880                   DROP_FREED    | \
3881                   DROP_SHRINK_ALL |\
3882                   DROP_IDLE     | \
3883                   DROP_RESET_ACTIVE | \
3884                   DROP_RESET_SEQNO)
3885 static int
3886 i915_drop_caches_get(void *data, u64 *val)
3887 {
3888         *val = DROP_ALL;
3889
3890         return 0;
3891 }
3892
3893 static int
3894 i915_drop_caches_set(void *data, u64 val)
3895 {
3896         struct drm_i915_private *i915 = data;
3897
3898         DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
3899                   val, val & DROP_ALL);
3900
3901         if (val & DROP_RESET_ACTIVE &&
3902             wait_for(intel_engines_are_idle(i915), I915_IDLE_ENGINES_TIMEOUT))
3903                 i915_gem_set_wedged(i915);
3904
3905         /* No need to check and wait for gpu resets, only libdrm auto-restarts
3906          * on ioctls on -EAGAIN. */
3907         if (val & (DROP_ACTIVE | DROP_RETIRE | DROP_RESET_SEQNO)) {
3908                 int ret;
3909
3910                 ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
3911                 if (ret)
3912                         return ret;
3913
3914                 if (val & DROP_ACTIVE)
3915                         ret = i915_gem_wait_for_idle(i915,
3916                                                      I915_WAIT_INTERRUPTIBLE |
3917                                                      I915_WAIT_LOCKED,
3918                                                      MAX_SCHEDULE_TIMEOUT);
3919
3920                 if (val & DROP_RETIRE)
3921                         i915_retire_requests(i915);
3922
3923                 mutex_unlock(&i915->drm.struct_mutex);
3924         }
3925
3926         if (val & DROP_RESET_ACTIVE && i915_terminally_wedged(i915))
3927                 i915_handle_error(i915, ALL_ENGINES, 0, NULL);
3928
3929         fs_reclaim_acquire(GFP_KERNEL);
3930         if (val & DROP_BOUND)
3931                 i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_BOUND);
3932
3933         if (val & DROP_UNBOUND)
3934                 i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_UNBOUND);
3935
3936         if (val & DROP_SHRINK_ALL)
3937                 i915_gem_shrink_all(i915);
3938         fs_reclaim_release(GFP_KERNEL);
3939
3940         if (val & DROP_IDLE) {
3941                 do {
3942                         if (READ_ONCE(i915->gt.active_requests))
3943                                 flush_delayed_work(&i915->gt.retire_work);
3944                         drain_delayed_work(&i915->gt.idle_work);
3945                 } while (READ_ONCE(i915->gt.awake));
3946         }
3947
3948         if (val & DROP_FREED)
3949                 i915_gem_drain_freed_objects(i915);
3950
3951         return 0;
3952 }
3953
3954 DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
3955                         i915_drop_caches_get, i915_drop_caches_set,
3956                         "0x%08llx\n");
3957
3958 static int
3959 i915_cache_sharing_get(void *data, u64 *val)
3960 {
3961         struct drm_i915_private *dev_priv = data;
3962         intel_wakeref_t wakeref;
3963         u32 snpcr = 0;
3964
3965         if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
3966                 return -ENODEV;
3967
3968         with_intel_runtime_pm(dev_priv, wakeref)
3969                 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
3970
3971         *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
3972
3973         return 0;
3974 }
3975
3976 static int
3977 i915_cache_sharing_set(void *data, u64 val)
3978 {
3979         struct drm_i915_private *dev_priv = data;
3980         intel_wakeref_t wakeref;
3981
3982         if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
3983                 return -ENODEV;
3984
3985         if (val > 3)
3986                 return -EINVAL;
3987
3988         DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
3989         with_intel_runtime_pm(dev_priv, wakeref) {
3990                 u32 snpcr;
3991
3992                 /* Update the cache sharing policy here as well */
3993                 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
3994                 snpcr &= ~GEN6_MBC_SNPCR_MASK;
3995                 snpcr |= val << GEN6_MBC_SNPCR_SHIFT;
3996                 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
3997         }
3998
3999         return 0;
4000 }
4001
4002 DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
4003                         i915_cache_sharing_get, i915_cache_sharing_set,
4004                         "%llu\n");
4005
4006 static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv,
4007                                           struct sseu_dev_info *sseu)
4008 {
4009 #define SS_MAX 2
4010         const int ss_max = SS_MAX;
4011         u32 sig1[SS_MAX], sig2[SS_MAX];
4012         int ss;
4013
4014         sig1[0] = I915_READ(CHV_POWER_SS0_SIG1);
4015         sig1[1] = I915_READ(CHV_POWER_SS1_SIG1);
4016         sig2[0] = I915_READ(CHV_POWER_SS0_SIG2);
4017         sig2[1] = I915_READ(CHV_POWER_SS1_SIG2);
4018
4019         for (ss = 0; ss < ss_max; ss++) {
4020                 unsigned int eu_cnt;
4021
4022                 if (sig1[ss] & CHV_SS_PG_ENABLE)
4023                         /* skip disabled subslice */
4024                         continue;
4025
4026                 sseu->slice_mask = BIT(0);
4027                 sseu->subslice_mask[0] |= BIT(ss);
4028                 eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
4029                          ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
4030                          ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
4031                          ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
4032                 sseu->eu_total += eu_cnt;
4033                 sseu->eu_per_subslice = max_t(unsigned int,
4034                                               sseu->eu_per_subslice, eu_cnt);
4035         }
4036 #undef SS_MAX
4037 }
4038
4039 static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
4040                                      struct sseu_dev_info *sseu)
4041 {
4042 #define SS_MAX 6
4043         const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
4044         u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
4045         int s, ss;
4046
4047         for (s = 0; s < info->sseu.max_slices; s++) {
4048                 /*
4049                  * FIXME: Valid SS Mask respects the spec and read
4050                  * only valid bits for those registers, excluding reserved
4051                  * although this seems wrong because it would leave many
4052                  * subslices without ACK.
4053                  */
4054                 s_reg[s] = I915_READ(GEN10_SLICE_PGCTL_ACK(s)) &
4055                         GEN10_PGCTL_VALID_SS_MASK(s);
4056                 eu_reg[2 * s] = I915_READ(GEN10_SS01_EU_PGCTL_ACK(s));
4057                 eu_reg[2 * s + 1] = I915_READ(GEN10_SS23_EU_PGCTL_ACK(s));
4058         }
4059
4060         eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
4061                      GEN9_PGCTL_SSA_EU19_ACK |
4062                      GEN9_PGCTL_SSA_EU210_ACK |
4063                      GEN9_PGCTL_SSA_EU311_ACK;
4064         eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
4065                      GEN9_PGCTL_SSB_EU19_ACK |
4066                      GEN9_PGCTL_SSB_EU210_ACK |
4067                      GEN9_PGCTL_SSB_EU311_ACK;
4068
4069         for (s = 0; s < info->sseu.max_slices; s++) {
4070                 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
4071                         /* skip disabled slice */
4072                         continue;
4073
4074                 sseu->slice_mask |= BIT(s);
4075                 sseu->subslice_mask[s] = info->sseu.subslice_mask[s];
4076
4077                 for (ss = 0; ss < info->sseu.max_subslices; ss++) {
4078                         unsigned int eu_cnt;
4079
4080                         if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
4081                                 /* skip disabled subslice */
4082                                 continue;
4083
4084                         eu_cnt = 2 * hweight32(eu_reg[2 * s + ss / 2] &
4085                                                eu_mask[ss % 2]);
4086                         sseu->eu_total += eu_cnt;
4087                         sseu->eu_per_subslice = max_t(unsigned int,
4088                                                       sseu->eu_per_subslice,
4089                                                       eu_cnt);
4090                 }
4091         }
4092 #undef SS_MAX
4093 }
4094
4095 static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
4096                                     struct sseu_dev_info *sseu)
4097 {
4098 #define SS_MAX 3
4099         const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
4100         u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
4101         int s, ss;
4102
4103         for (s = 0; s < info->sseu.max_slices; s++) {
4104                 s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s));
4105                 eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s));
4106                 eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s));
4107         }
4108
4109         eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
4110                      GEN9_PGCTL_SSA_EU19_ACK |
4111                      GEN9_PGCTL_SSA_EU210_ACK |
4112                      GEN9_PGCTL_SSA_EU311_ACK;
4113         eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
4114                      GEN9_PGCTL_SSB_EU19_ACK |
4115                      GEN9_PGCTL_SSB_EU210_ACK |
4116                      GEN9_PGCTL_SSB_EU311_ACK;
4117
4118         for (s = 0; s < info->sseu.max_slices; s++) {
4119                 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
4120                         /* skip disabled slice */
4121                         continue;
4122
4123                 sseu->slice_mask |= BIT(s);
4124
4125                 if (IS_GEN9_BC(dev_priv))
4126                         sseu->subslice_mask[s] =
4127                                 RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s];
4128
4129                 for (ss = 0; ss < info->sseu.max_subslices; ss++) {
4130                         unsigned int eu_cnt;
4131
4132                         if (IS_GEN9_LP(dev_priv)) {
4133                                 if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
4134                                         /* skip disabled subslice */
4135                                         continue;
4136
4137                                 sseu->subslice_mask[s] |= BIT(ss);
4138                         }
4139
4140                         eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
4141                                                eu_mask[ss%2]);
4142                         sseu->eu_total += eu_cnt;
4143                         sseu->eu_per_subslice = max_t(unsigned int,
4144                                                       sseu->eu_per_subslice,
4145                                                       eu_cnt);
4146                 }
4147         }
4148 #undef SS_MAX
4149 }
4150
4151 static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
4152                                          struct sseu_dev_info *sseu)
4153 {
4154         u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
4155         int s;
4156
4157         sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK;
4158
4159         if (sseu->slice_mask) {
4160                 sseu->eu_per_subslice =
4161                         RUNTIME_INFO(dev_priv)->sseu.eu_per_subslice;
4162                 for (s = 0; s < fls(sseu->slice_mask); s++) {
4163                         sseu->subslice_mask[s] =
4164                                 RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s];
4165                 }
4166                 sseu->eu_total = sseu->eu_per_subslice *
4167                                  sseu_subslice_total(sseu);
4168
4169                 /* subtract fused off EU(s) from enabled slice(s) */
4170                 for (s = 0; s < fls(sseu->slice_mask); s++) {
4171                         u8 subslice_7eu =
4172                                 RUNTIME_INFO(dev_priv)->sseu.subslice_7eu[s];
4173
4174                         sseu->eu_total -= hweight8(subslice_7eu);
4175                 }
4176         }
4177 }
4178
4179 static void i915_print_sseu_info(struct seq_file *m, bool is_available_info,
4180                                  const struct sseu_dev_info *sseu)
4181 {
4182         struct drm_i915_private *dev_priv = node_to_i915(m->private);
4183         const char *type = is_available_info ? "Available" : "Enabled";
4184         int s;
4185
4186         seq_printf(m, "  %s Slice Mask: %04x\n", type,
4187                    sseu->slice_mask);
4188         seq_printf(m, "  %s Slice Total: %u\n", type,
4189                    hweight8(sseu->slice_mask));
4190         seq_printf(m, "  %s Subslice Total: %u\n", type,
4191                    sseu_subslice_total(sseu));
4192         for (s = 0; s < fls(sseu->slice_mask); s++) {
4193                 seq_printf(m, "  %s Slice%i subslices: %u\n", type,
4194                            s, hweight8(sseu->subslice_mask[s]));
4195         }
4196         seq_printf(m, "  %s EU Total: %u\n", type,
4197                    sseu->eu_total);
4198         seq_printf(m, "  %s EU Per Subslice: %u\n", type,
4199                    sseu->eu_per_subslice);
4200
4201         if (!is_available_info)
4202                 return;
4203
4204         seq_printf(m, "  Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev_priv)));
4205         if (HAS_POOLED_EU(dev_priv))
4206                 seq_printf(m, "  Min EU in pool: %u\n", sseu->min_eu_in_pool);
4207
4208         seq_printf(m, "  Has Slice Power Gating: %s\n",
4209                    yesno(sseu->has_slice_pg));
4210         seq_printf(m, "  Has Subslice Power Gating: %s\n",
4211                    yesno(sseu->has_subslice_pg));
4212         seq_printf(m, "  Has EU Power Gating: %s\n",
4213                    yesno(sseu->has_eu_pg));
4214 }
4215
4216 static int i915_sseu_status(struct seq_file *m, void *unused)
4217 {
4218         struct drm_i915_private *dev_priv = node_to_i915(m->private);
4219         struct sseu_dev_info sseu;
4220         intel_wakeref_t wakeref;
4221
4222         if (INTEL_GEN(dev_priv) < 8)
4223                 return -ENODEV;
4224
4225         seq_puts(m, "SSEU Device Info\n");
4226         i915_print_sseu_info(m, true, &RUNTIME_INFO(dev_priv)->sseu);
4227
4228         seq_puts(m, "SSEU Device Status\n");
4229         memset(&sseu, 0, sizeof(sseu));
4230         sseu.max_slices = RUNTIME_INFO(dev_priv)->sseu.max_slices;
4231         sseu.max_subslices = RUNTIME_INFO(dev_priv)->sseu.max_subslices;
4232         sseu.max_eus_per_subslice =
4233                 RUNTIME_INFO(dev_priv)->sseu.max_eus_per_subslice;
4234
4235         with_intel_runtime_pm(dev_priv, wakeref) {
4236                 if (IS_CHERRYVIEW(dev_priv))
4237                         cherryview_sseu_device_status(dev_priv, &sseu);
4238                 else if (IS_BROADWELL(dev_priv))
4239                         broadwell_sseu_device_status(dev_priv, &sseu);
4240                 else if (IS_GEN(dev_priv, 9))
4241                         gen9_sseu_device_status(dev_priv, &sseu);
4242                 else if (INTEL_GEN(dev_priv) >= 10)
4243                         gen10_sseu_device_status(dev_priv, &sseu);
4244         }
4245
4246         i915_print_sseu_info(m, false, &sseu);
4247
4248         return 0;
4249 }
4250
4251 static int i915_forcewake_open(struct inode *inode, struct file *file)
4252 {
4253         struct drm_i915_private *i915 = inode->i_private;
4254
4255         if (INTEL_GEN(i915) < 6)
4256                 return 0;
4257
4258         file->private_data = (void *)(uintptr_t)intel_runtime_pm_get(i915);
4259         intel_uncore_forcewake_user_get(&i915->uncore);
4260
4261         return 0;
4262 }
4263
4264 static int i915_forcewake_release(struct inode *inode, struct file *file)
4265 {
4266         struct drm_i915_private *i915 = inode->i_private;
4267
4268         if (INTEL_GEN(i915) < 6)
4269                 return 0;
4270
4271         intel_uncore_forcewake_user_put(&i915->uncore);
4272         intel_runtime_pm_put(i915,
4273                              (intel_wakeref_t)(uintptr_t)file->private_data);
4274
4275         return 0;
4276 }
4277
4278 static const struct file_operations i915_forcewake_fops = {
4279         .owner = THIS_MODULE,
4280         .open = i915_forcewake_open,
4281         .release = i915_forcewake_release,
4282 };
4283
4284 static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
4285 {
4286         struct drm_i915_private *dev_priv = m->private;
4287         struct i915_hotplug *hotplug = &dev_priv->hotplug;
4288
4289         /* Synchronize with everything first in case there's been an HPD
4290          * storm, but we haven't finished handling it in the kernel yet
4291          */
4292         synchronize_irq(dev_priv->drm.irq);
4293         flush_work(&dev_priv->hotplug.dig_port_work);
4294         flush_work(&dev_priv->hotplug.hotplug_work);
4295
4296         seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
4297         seq_printf(m, "Detected: %s\n",
4298                    yesno(delayed_work_pending(&hotplug->reenable_work)));
4299
4300         return 0;
4301 }
4302
4303 static ssize_t i915_hpd_storm_ctl_write(struct file *file,
4304                                         const char __user *ubuf, size_t len,
4305                                         loff_t *offp)
4306 {
4307         struct seq_file *m = file->private_data;
4308         struct drm_i915_private *dev_priv = m->private;
4309         struct i915_hotplug *hotplug = &dev_priv->hotplug;
4310         unsigned int new_threshold;
4311         int i;
4312         char *newline;
4313         char tmp[16];
4314
4315         if (len >= sizeof(tmp))
4316                 return -EINVAL;
4317
4318         if (copy_from_user(tmp, ubuf, len))
4319                 return -EFAULT;
4320
4321         tmp[len] = '\0';
4322
4323         /* Strip newline, if any */
4324         newline = strchr(tmp, '\n');
4325         if (newline)
4326                 *newline = '\0';
4327
4328         if (strcmp(tmp, "reset") == 0)
4329                 new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
4330         else if (kstrtouint(tmp, 10, &new_threshold) != 0)
4331                 return -EINVAL;
4332
4333         if (new_threshold > 0)
4334                 DRM_DEBUG_KMS("Setting HPD storm detection threshold to %d\n",
4335                               new_threshold);
4336         else
4337                 DRM_DEBUG_KMS("Disabling HPD storm detection\n");
4338
4339         spin_lock_irq(&dev_priv->irq_lock);
4340         hotplug->hpd_storm_threshold = new_threshold;
4341         /* Reset the HPD storm stats so we don't accidentally trigger a storm */
4342         for_each_hpd_pin(i)
4343                 hotplug->stats[i].count = 0;
4344         spin_unlock_irq(&dev_priv->irq_lock);
4345
4346         /* Re-enable hpd immediately if we were in an irq storm */
4347         flush_delayed_work(&dev_priv->hotplug.reenable_work);
4348
4349         return len;
4350 }
4351
4352 static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
4353 {
4354         return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
4355 }
4356
4357 static const struct file_operations i915_hpd_storm_ctl_fops = {
4358         .owner = THIS_MODULE,
4359         .open = i915_hpd_storm_ctl_open,
4360         .read = seq_read,
4361         .llseek = seq_lseek,
4362         .release = single_release,
4363         .write = i915_hpd_storm_ctl_write
4364 };
4365
4366 static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data)
4367 {
4368         struct drm_i915_private *dev_priv = m->private;
4369
4370         seq_printf(m, "Enabled: %s\n",
4371                    yesno(dev_priv->hotplug.hpd_short_storm_enabled));
4372
4373         return 0;
4374 }
4375
4376 static int
4377 i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file)
4378 {
4379         return single_open(file, i915_hpd_short_storm_ctl_show,
4380                            inode->i_private);
4381 }
4382
4383 static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
4384                                               const char __user *ubuf,
4385                                               size_t len, loff_t *offp)
4386 {
4387         struct seq_file *m = file->private_data;
4388         struct drm_i915_private *dev_priv = m->private;
4389         struct i915_hotplug *hotplug = &dev_priv->hotplug;
4390         char *newline;
4391         char tmp[16];
4392         int i;
4393         bool new_state;
4394
4395         if (len >= sizeof(tmp))
4396                 return -EINVAL;
4397
4398         if (copy_from_user(tmp, ubuf, len))
4399                 return -EFAULT;
4400
4401         tmp[len] = '\0';
4402
4403         /* Strip newline, if any */
4404         newline = strchr(tmp, '\n');
4405         if (newline)
4406                 *newline = '\0';
4407
4408         /* Reset to the "default" state for this system */
4409         if (strcmp(tmp, "reset") == 0)
4410                 new_state = !HAS_DP_MST(dev_priv);
4411         else if (kstrtobool(tmp, &new_state) != 0)
4412                 return -EINVAL;
4413
4414         DRM_DEBUG_KMS("%sabling HPD short storm detection\n",
4415                       new_state ? "En" : "Dis");
4416
4417         spin_lock_irq(&dev_priv->irq_lock);
4418         hotplug->hpd_short_storm_enabled = new_state;
4419         /* Reset the HPD storm stats so we don't accidentally trigger a storm */
4420         for_each_hpd_pin(i)
4421                 hotplug->stats[i].count = 0;
4422         spin_unlock_irq(&dev_priv->irq_lock);
4423
4424         /* Re-enable hpd immediately if we were in an irq storm */
4425         flush_delayed_work(&dev_priv->hotplug.reenable_work);
4426
4427         return len;
4428 }
4429
4430 static const struct file_operations i915_hpd_short_storm_ctl_fops = {
4431         .owner = THIS_MODULE,
4432         .open = i915_hpd_short_storm_ctl_open,
4433         .read = seq_read,
4434         .llseek = seq_lseek,
4435         .release = single_release,
4436         .write = i915_hpd_short_storm_ctl_write,
4437 };
4438
4439 static int i915_drrs_ctl_set(void *data, u64 val)
4440 {
4441         struct drm_i915_private *dev_priv = data;
4442         struct drm_device *dev = &dev_priv->drm;
4443         struct intel_crtc *crtc;
4444
4445         if (INTEL_GEN(dev_priv) < 7)
4446                 return -ENODEV;
4447
4448         for_each_intel_crtc(dev, crtc) {
4449                 struct drm_connector_list_iter conn_iter;
4450                 struct intel_crtc_state *crtc_state;
4451                 struct drm_connector *connector;
4452                 struct drm_crtc_commit *commit;
4453                 int ret;
4454
4455                 ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex);
4456                 if (ret)
4457                         return ret;
4458
4459                 crtc_state = to_intel_crtc_state(crtc->base.state);
4460
4461                 if (!crtc_state->base.active ||
4462                     !crtc_state->has_drrs)
4463                         goto out;
4464
4465                 commit = crtc_state->base.commit;
4466                 if (commit) {
4467                         ret = wait_for_completion_interruptible(&commit->hw_done);
4468                         if (ret)
4469                                 goto out;
4470                 }
4471
4472                 drm_connector_list_iter_begin(dev, &conn_iter);
4473                 drm_for_each_connector_iter(connector, &conn_iter) {
4474                         struct intel_encoder *encoder;
4475                         struct intel_dp *intel_dp;
4476
4477                         if (!(crtc_state->base.connector_mask &
4478                               drm_connector_mask(connector)))
4479                                 continue;
4480
4481                         encoder = intel_attached_encoder(connector);
4482                         if (encoder->type != INTEL_OUTPUT_EDP)
4483                                 continue;
4484
4485                         DRM_DEBUG_DRIVER("Manually %sabling DRRS. %llu\n",
4486                                                 val ? "en" : "dis", val);
4487
4488                         intel_dp = enc_to_intel_dp(&encoder->base);
4489                         if (val)
4490                                 intel_edp_drrs_enable(intel_dp,
4491                                                       crtc_state);
4492                         else
4493                                 intel_edp_drrs_disable(intel_dp,
4494                                                        crtc_state);
4495                 }
4496                 drm_connector_list_iter_end(&conn_iter);
4497
4498 out:
4499                 drm_modeset_unlock(&crtc->base.mutex);
4500                 if (ret)
4501                         return ret;
4502         }
4503
4504         return 0;
4505 }
4506
4507 DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n");
4508
4509 static ssize_t
4510 i915_fifo_underrun_reset_write(struct file *filp,
4511                                const char __user *ubuf,
4512                                size_t cnt, loff_t *ppos)
4513 {
4514         struct drm_i915_private *dev_priv = filp->private_data;
4515         struct intel_crtc *intel_crtc;
4516         struct drm_device *dev = &dev_priv->drm;
4517         int ret;
4518         bool reset;
4519
4520         ret = kstrtobool_from_user(ubuf, cnt, &reset);
4521         if (ret)
4522                 return ret;
4523
4524         if (!reset)
4525                 return cnt;
4526
4527         for_each_intel_crtc(dev, intel_crtc) {
4528                 struct drm_crtc_commit *commit;
4529                 struct intel_crtc_state *crtc_state;
4530
4531                 ret = drm_modeset_lock_single_interruptible(&intel_crtc->base.mutex);
4532                 if (ret)
4533                         return ret;
4534
4535                 crtc_state = to_intel_crtc_state(intel_crtc->base.state);
4536                 commit = crtc_state->base.commit;
4537                 if (commit) {
4538                         ret = wait_for_completion_interruptible(&commit->hw_done);
4539                         if (!ret)
4540                                 ret = wait_for_completion_interruptible(&commit->flip_done);
4541                 }
4542
4543                 if (!ret && crtc_state->base.active) {
4544                         DRM_DEBUG_KMS("Re-arming FIFO underruns on pipe %c\n",
4545                                       pipe_name(intel_crtc->pipe));
4546
4547                         intel_crtc_arm_fifo_underrun(intel_crtc, crtc_state);
4548                 }
4549
4550                 drm_modeset_unlock(&intel_crtc->base.mutex);
4551
4552                 if (ret)
4553                         return ret;
4554         }
4555
4556         ret = intel_fbc_reset_underrun(dev_priv);
4557         if (ret)
4558                 return ret;
4559
4560         return cnt;
4561 }
4562
4563 static const struct file_operations i915_fifo_underrun_reset_ops = {
4564         .owner = THIS_MODULE,
4565         .open = simple_open,
4566         .write = i915_fifo_underrun_reset_write,
4567         .llseek = default_llseek,
4568 };
4569
4570 static const struct drm_info_list i915_debugfs_list[] = {
4571         {"i915_capabilities", i915_capabilities, 0},
4572         {"i915_gem_objects", i915_gem_object_info, 0},
4573         {"i915_gem_gtt", i915_gem_gtt_info, 0},
4574         {"i915_gem_stolen", i915_gem_stolen_list_info },
4575         {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
4576         {"i915_gem_interrupt", i915_interrupt_info, 0},
4577         {"i915_gem_batch_pool", i915_gem_batch_pool_info, 0},
4578         {"i915_guc_info", i915_guc_info, 0},
4579         {"i915_guc_load_status", i915_guc_load_status_info, 0},
4580         {"i915_guc_log_dump", i915_guc_log_dump, 0},
4581         {"i915_guc_load_err_log_dump", i915_guc_log_dump, 0, (void *)1},
4582         {"i915_guc_stage_pool", i915_guc_stage_pool, 0},
4583         {"i915_huc_load_status", i915_huc_load_status_info, 0},
4584         {"i915_frequency_info", i915_frequency_info, 0},
4585         {"i915_hangcheck_info", i915_hangcheck_info, 0},
4586         {"i915_reset_info", i915_reset_info, 0},
4587         {"i915_drpc_info", i915_drpc_info, 0},
4588         {"i915_emon_status", i915_emon_status, 0},
4589         {"i915_ring_freq_table", i915_ring_freq_table, 0},
4590         {"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
4591         {"i915_fbc_status", i915_fbc_status, 0},
4592         {"i915_ips_status", i915_ips_status, 0},
4593         {"i915_sr_status", i915_sr_status, 0},
4594         {"i915_opregion", i915_opregion, 0},
4595         {"i915_vbt", i915_vbt, 0},
4596         {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
4597         {"i915_context_status", i915_context_status, 0},
4598         {"i915_forcewake_domains", i915_forcewake_domains, 0},
4599         {"i915_swizzle_info", i915_swizzle_info, 0},
4600         {"i915_llc", i915_llc, 0},
4601         {"i915_edp_psr_status", i915_edp_psr_status, 0},
4602         {"i915_energy_uJ", i915_energy_uJ, 0},
4603         {"i915_runtime_pm_status", i915_runtime_pm_status, 0},
4604         {"i915_power_domain_info", i915_power_domain_info, 0},
4605         {"i915_dmc_info", i915_dmc_info, 0},
4606         {"i915_display_info", i915_display_info, 0},
4607         {"i915_engine_info", i915_engine_info, 0},
4608         {"i915_rcs_topology", i915_rcs_topology, 0},
4609         {"i915_shrinker_info", i915_shrinker_info, 0},
4610         {"i915_shared_dplls_info", i915_shared_dplls_info, 0},
4611         {"i915_dp_mst_info", i915_dp_mst_info, 0},
4612         {"i915_wa_registers", i915_wa_registers, 0},
4613         {"i915_ddb_info", i915_ddb_info, 0},
4614         {"i915_sseu_status", i915_sseu_status, 0},
4615         {"i915_drrs_status", i915_drrs_status, 0},
4616         {"i915_rps_boost_info", i915_rps_boost_info, 0},
4617 };
4618 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
4619
4620 static const struct i915_debugfs_files {
4621         const char *name;
4622         const struct file_operations *fops;
4623 } i915_debugfs_files[] = {
4624         {"i915_wedged", &i915_wedged_fops},
4625         {"i915_cache_sharing", &i915_cache_sharing_fops},
4626         {"i915_gem_drop_caches", &i915_drop_caches_fops},
4627 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
4628         {"i915_error_state", &i915_error_state_fops},
4629         {"i915_gpu_info", &i915_gpu_info_fops},
4630 #endif
4631         {"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
4632         {"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
4633         {"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
4634         {"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
4635         {"i915_fbc_false_color", &i915_fbc_false_color_fops},
4636         {"i915_dp_test_data", &i915_displayport_test_data_fops},
4637         {"i915_dp_test_type", &i915_displayport_test_type_fops},
4638         {"i915_dp_test_active", &i915_displayport_test_active_fops},
4639         {"i915_guc_log_level", &i915_guc_log_level_fops},
4640         {"i915_guc_log_relay", &i915_guc_log_relay_fops},
4641         {"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
4642         {"i915_hpd_short_storm_ctl", &i915_hpd_short_storm_ctl_fops},
4643         {"i915_ipc_status", &i915_ipc_status_fops},
4644         {"i915_drrs_ctl", &i915_drrs_ctl_fops},
4645         {"i915_edp_psr_debug", &i915_edp_psr_debug_fops}
4646 };
4647
4648 int i915_debugfs_register(struct drm_i915_private *dev_priv)
4649 {
4650         struct drm_minor *minor = dev_priv->drm.primary;
4651         struct dentry *ent;
4652         int i;
4653
4654         ent = debugfs_create_file("i915_forcewake_user", S_IRUSR,
4655                                   minor->debugfs_root, to_i915(minor->dev),
4656                                   &i915_forcewake_fops);
4657         if (!ent)
4658                 return -ENOMEM;
4659
4660         for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
4661                 ent = debugfs_create_file(i915_debugfs_files[i].name,
4662                                           S_IRUGO | S_IWUSR,
4663                                           minor->debugfs_root,
4664                                           to_i915(minor->dev),
4665                                           i915_debugfs_files[i].fops);
4666                 if (!ent)
4667                         return -ENOMEM;
4668         }
4669
4670         return drm_debugfs_create_files(i915_debugfs_list,
4671                                         I915_DEBUGFS_ENTRIES,
4672                                         minor->debugfs_root, minor);
4673 }
4674
4675 struct dpcd_block {
4676         /* DPCD dump start address. */
4677         unsigned int offset;
4678         /* DPCD dump end address, inclusive. If unset, .size will be used. */
4679         unsigned int end;
4680         /* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */
4681         size_t size;
4682         /* Only valid for eDP. */
4683         bool edp;
4684 };
4685
4686 static const struct dpcd_block i915_dpcd_debug[] = {
4687         { .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE },
4688         { .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS },
4689         { .offset = DP_DOWNSTREAM_PORT_0, .size = 16 },
4690         { .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET },
4691         { .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 },
4692         { .offset = DP_SET_POWER },
4693         { .offset = DP_EDP_DPCD_REV },
4694         { .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 },
4695         { .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB },
4696         { .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET },
4697 };
4698
4699 static int i915_dpcd_show(struct seq_file *m, void *data)
4700 {
4701         struct drm_connector *connector = m->private;
4702         struct intel_dp *intel_dp =
4703                 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4704         u8 buf[16];
4705         ssize_t err;
4706         int i;
4707
4708         if (connector->status != connector_status_connected)
4709                 return -ENODEV;
4710
4711         for (i = 0; i < ARRAY_SIZE(i915_dpcd_debug); i++) {
4712                 const struct dpcd_block *b = &i915_dpcd_debug[i];
4713                 size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1);
4714
4715                 if (b->edp &&
4716                     connector->connector_type != DRM_MODE_CONNECTOR_eDP)
4717                         continue;
4718
4719                 /* low tech for now */
4720                 if (WARN_ON(size > sizeof(buf)))
4721                         continue;
4722
4723                 err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size);
4724                 if (err < 0)
4725                         seq_printf(m, "%04x: ERROR %d\n", b->offset, (int)err);
4726                 else
4727                         seq_printf(m, "%04x: %*ph\n", b->offset, (int)err, buf);
4728         }
4729
4730         return 0;
4731 }
4732 DEFINE_SHOW_ATTRIBUTE(i915_dpcd);
4733
4734 static int i915_panel_show(struct seq_file *m, void *data)
4735 {
4736         struct drm_connector *connector = m->private;
4737         struct intel_dp *intel_dp =
4738                 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4739
4740         if (connector->status != connector_status_connected)
4741                 return -ENODEV;
4742
4743         seq_printf(m, "Panel power up delay: %d\n",
4744                    intel_dp->panel_power_up_delay);
4745         seq_printf(m, "Panel power down delay: %d\n",
4746                    intel_dp->panel_power_down_delay);
4747         seq_printf(m, "Backlight on delay: %d\n",
4748                    intel_dp->backlight_on_delay);
4749         seq_printf(m, "Backlight off delay: %d\n",
4750                    intel_dp->backlight_off_delay);
4751
4752         return 0;
4753 }
4754 DEFINE_SHOW_ATTRIBUTE(i915_panel);
4755
4756 static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
4757 {
4758         struct drm_connector *connector = m->private;
4759         struct intel_connector *intel_connector = to_intel_connector(connector);
4760
4761         if (connector->status != connector_status_connected)
4762                 return -ENODEV;
4763
4764         /* HDCP is supported by connector */
4765         if (!intel_connector->hdcp.shim)
4766                 return -EINVAL;
4767
4768         seq_printf(m, "%s:%d HDCP version: ", connector->name,
4769                    connector->base.id);
4770         seq_printf(m, "%s ", !intel_hdcp_capable(intel_connector) ?
4771                    "None" : "HDCP1.4");
4772         seq_puts(m, "\n");
4773
4774         return 0;
4775 }
4776 DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
4777
4778 static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
4779 {
4780         struct drm_connector *connector = m->private;
4781         struct drm_device *dev = connector->dev;
4782         struct drm_crtc *crtc;
4783         struct intel_dp *intel_dp;
4784         struct drm_modeset_acquire_ctx ctx;
4785         struct intel_crtc_state *crtc_state = NULL;
4786         int ret = 0;
4787         bool try_again = false;
4788
4789         drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
4790
4791         do {
4792                 try_again = false;
4793                 ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
4794                                        &ctx);
4795                 if (ret) {
4796                         if (ret == -EDEADLK && !drm_modeset_backoff(&ctx)) {
4797                                 try_again = true;
4798                                 continue;
4799                         }
4800                         break;
4801                 }
4802                 crtc = connector->state->crtc;
4803                 if (connector->status != connector_status_connected || !crtc) {
4804                         ret = -ENODEV;
4805                         break;
4806                 }
4807                 ret = drm_modeset_lock(&crtc->mutex, &ctx);
4808                 if (ret == -EDEADLK) {
4809                         ret = drm_modeset_backoff(&ctx);
4810                         if (!ret) {
4811                                 try_again = true;
4812                                 continue;
4813                         }
4814                         break;
4815                 } else if (ret) {
4816                         break;
4817                 }
4818                 intel_dp = enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4819                 crtc_state = to_intel_crtc_state(crtc->state);
4820                 seq_printf(m, "DSC_Enabled: %s\n",
4821                            yesno(crtc_state->dsc_params.compression_enable));
4822                 seq_printf(m, "DSC_Sink_Support: %s\n",
4823                            yesno(drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)));
4824                 seq_printf(m, "Force_DSC_Enable: %s\n",
4825                            yesno(intel_dp->force_dsc_en));
4826                 if (!intel_dp_is_edp(intel_dp))
4827                         seq_printf(m, "FEC_Sink_Support: %s\n",
4828                                    yesno(drm_dp_sink_supports_fec(intel_dp->fec_capable)));
4829         } while (try_again);
4830
4831         drm_modeset_drop_locks(&ctx);
4832         drm_modeset_acquire_fini(&ctx);
4833
4834         return ret;
4835 }
4836
4837 static ssize_t i915_dsc_fec_support_write(struct file *file,
4838                                           const char __user *ubuf,
4839                                           size_t len, loff_t *offp)
4840 {
4841         bool dsc_enable = false;
4842         int ret;
4843         struct drm_connector *connector =
4844                 ((struct seq_file *)file->private_data)->private;
4845         struct intel_encoder *encoder = intel_attached_encoder(connector);
4846         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
4847
4848         if (len == 0)
4849                 return 0;
4850
4851         DRM_DEBUG_DRIVER("Copied %zu bytes from user to force DSC\n",
4852                          len);
4853
4854         ret = kstrtobool_from_user(ubuf, len, &dsc_enable);
4855         if (ret < 0)
4856                 return ret;
4857
4858         DRM_DEBUG_DRIVER("Got %s for DSC Enable\n",
4859                          (dsc_enable) ? "true" : "false");
4860         intel_dp->force_dsc_en = dsc_enable;
4861
4862         *offp += len;
4863         return len;
4864 }
4865
4866 static int i915_dsc_fec_support_open(struct inode *inode,
4867                                      struct file *file)
4868 {
4869         return single_open(file, i915_dsc_fec_support_show,
4870                            inode->i_private);
4871 }
4872
4873 static const struct file_operations i915_dsc_fec_support_fops = {
4874         .owner = THIS_MODULE,
4875         .open = i915_dsc_fec_support_open,
4876         .read = seq_read,
4877         .llseek = seq_lseek,
4878         .release = single_release,
4879         .write = i915_dsc_fec_support_write
4880 };
4881
4882 /**
4883  * i915_debugfs_connector_add - add i915 specific connector debugfs files
4884  * @connector: pointer to a registered drm_connector
4885  *
4886  * Cleanup will be done by drm_connector_unregister() through a call to
4887  * drm_debugfs_connector_remove().
4888  *
4889  * Returns 0 on success, negative error codes on error.
4890  */
4891 int i915_debugfs_connector_add(struct drm_connector *connector)
4892 {
4893         struct dentry *root = connector->debugfs_entry;
4894         struct drm_i915_private *dev_priv = to_i915(connector->dev);
4895
4896         /* The connector must have been registered beforehands. */
4897         if (!root)
4898                 return -ENODEV;
4899
4900         if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4901             connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4902                 debugfs_create_file("i915_dpcd", S_IRUGO, root,
4903                                     connector, &i915_dpcd_fops);
4904
4905         if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
4906                 debugfs_create_file("i915_panel_timings", S_IRUGO, root,
4907                                     connector, &i915_panel_fops);
4908                 debugfs_create_file("i915_psr_sink_status", S_IRUGO, root,
4909                                     connector, &i915_psr_sink_status_fops);
4910         }
4911
4912         if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4913             connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
4914             connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) {
4915                 debugfs_create_file("i915_hdcp_sink_capability", S_IRUGO, root,
4916                                     connector, &i915_hdcp_sink_capability_fops);
4917         }
4918
4919         if (INTEL_GEN(dev_priv) >= 10 &&
4920             (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4921              connector->connector_type == DRM_MODE_CONNECTOR_eDP))
4922                 debugfs_create_file("i915_dsc_fec_support", S_IRUGO, root,
4923                                     connector, &i915_dsc_fec_support_fops);
4924
4925         return 0;
4926 }