]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/gpu/drm/i915/i915_debugfs.c
drm/i915: take a reference to uncore in the engine and use it
[linux.git] / drivers / gpu / drm / i915 / i915_debugfs.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Keith Packard <keithp@keithp.com>
26  *
27  */
28
29 #include <linux/sort.h>
30 #include <linux/sched/mm.h>
31 #include <drm/drm_debugfs.h>
32 #include <drm/drm_fourcc.h>
33 #include "intel_drv.h"
34 #include "intel_guc_submission.h"
35
36 #include "i915_reset.h"
37
38 static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
39 {
40         return to_i915(node->minor->dev);
41 }
42
43 static int i915_capabilities(struct seq_file *m, void *data)
44 {
45         struct drm_i915_private *dev_priv = node_to_i915(m->private);
46         const struct intel_device_info *info = INTEL_INFO(dev_priv);
47         struct drm_printer p = drm_seq_file_printer(m);
48
49         seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv));
50         seq_printf(m, "platform: %s\n", intel_platform_name(info->platform));
51         seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
52
53         intel_device_info_dump_flags(info, &p);
54         intel_device_info_dump_runtime(RUNTIME_INFO(dev_priv), &p);
55         intel_driver_caps_print(&dev_priv->caps, &p);
56
57         kernel_param_lock(THIS_MODULE);
58         i915_params_dump(&i915_modparams, &p);
59         kernel_param_unlock(THIS_MODULE);
60
61         return 0;
62 }
63
64 static char get_active_flag(struct drm_i915_gem_object *obj)
65 {
66         return i915_gem_object_is_active(obj) ? '*' : ' ';
67 }
68
69 static char get_pin_flag(struct drm_i915_gem_object *obj)
70 {
71         return obj->pin_global ? 'p' : ' ';
72 }
73
74 static char get_tiling_flag(struct drm_i915_gem_object *obj)
75 {
76         switch (i915_gem_object_get_tiling(obj)) {
77         default:
78         case I915_TILING_NONE: return ' ';
79         case I915_TILING_X: return 'X';
80         case I915_TILING_Y: return 'Y';
81         }
82 }
83
84 static char get_global_flag(struct drm_i915_gem_object *obj)
85 {
86         return obj->userfault_count ? 'g' : ' ';
87 }
88
89 static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
90 {
91         return obj->mm.mapping ? 'M' : ' ';
92 }
93
94 static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
95 {
96         u64 size = 0;
97         struct i915_vma *vma;
98
99         for_each_ggtt_vma(vma, obj) {
100                 if (drm_mm_node_allocated(&vma->node))
101                         size += vma->node.size;
102         }
103
104         return size;
105 }
106
107 static const char *
108 stringify_page_sizes(unsigned int page_sizes, char *buf, size_t len)
109 {
110         size_t x = 0;
111
112         switch (page_sizes) {
113         case 0:
114                 return "";
115         case I915_GTT_PAGE_SIZE_4K:
116                 return "4K";
117         case I915_GTT_PAGE_SIZE_64K:
118                 return "64K";
119         case I915_GTT_PAGE_SIZE_2M:
120                 return "2M";
121         default:
122                 if (!buf)
123                         return "M";
124
125                 if (page_sizes & I915_GTT_PAGE_SIZE_2M)
126                         x += snprintf(buf + x, len - x, "2M, ");
127                 if (page_sizes & I915_GTT_PAGE_SIZE_64K)
128                         x += snprintf(buf + x, len - x, "64K, ");
129                 if (page_sizes & I915_GTT_PAGE_SIZE_4K)
130                         x += snprintf(buf + x, len - x, "4K, ");
131                 buf[x-2] = '\0';
132
133                 return buf;
134         }
135 }
136
137 static void
138 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
139 {
140         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
141         struct intel_engine_cs *engine;
142         struct i915_vma *vma;
143         unsigned int frontbuffer_bits;
144         int pin_count = 0;
145
146         lockdep_assert_held(&obj->base.dev->struct_mutex);
147
148         seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x %s%s%s",
149                    &obj->base,
150                    get_active_flag(obj),
151                    get_pin_flag(obj),
152                    get_tiling_flag(obj),
153                    get_global_flag(obj),
154                    get_pin_mapped_flag(obj),
155                    obj->base.size / 1024,
156                    obj->read_domains,
157                    obj->write_domain,
158                    i915_cache_level_str(dev_priv, obj->cache_level),
159                    obj->mm.dirty ? " dirty" : "",
160                    obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
161         if (obj->base.name)
162                 seq_printf(m, " (name: %d)", obj->base.name);
163         list_for_each_entry(vma, &obj->vma.list, obj_link) {
164                 if (i915_vma_is_pinned(vma))
165                         pin_count++;
166         }
167         seq_printf(m, " (pinned x %d)", pin_count);
168         if (obj->pin_global)
169                 seq_printf(m, " (global)");
170         list_for_each_entry(vma, &obj->vma.list, obj_link) {
171                 if (!drm_mm_node_allocated(&vma->node))
172                         continue;
173
174                 seq_printf(m, " (%sgtt offset: %08llx, size: %08llx, pages: %s",
175                            i915_vma_is_ggtt(vma) ? "g" : "pp",
176                            vma->node.start, vma->node.size,
177                            stringify_page_sizes(vma->page_sizes.gtt, NULL, 0));
178                 if (i915_vma_is_ggtt(vma)) {
179                         switch (vma->ggtt_view.type) {
180                         case I915_GGTT_VIEW_NORMAL:
181                                 seq_puts(m, ", normal");
182                                 break;
183
184                         case I915_GGTT_VIEW_PARTIAL:
185                                 seq_printf(m, ", partial [%08llx+%x]",
186                                            vma->ggtt_view.partial.offset << PAGE_SHIFT,
187                                            vma->ggtt_view.partial.size << PAGE_SHIFT);
188                                 break;
189
190                         case I915_GGTT_VIEW_ROTATED:
191                                 seq_printf(m, ", rotated [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
192                                            vma->ggtt_view.rotated.plane[0].width,
193                                            vma->ggtt_view.rotated.plane[0].height,
194                                            vma->ggtt_view.rotated.plane[0].stride,
195                                            vma->ggtt_view.rotated.plane[0].offset,
196                                            vma->ggtt_view.rotated.plane[1].width,
197                                            vma->ggtt_view.rotated.plane[1].height,
198                                            vma->ggtt_view.rotated.plane[1].stride,
199                                            vma->ggtt_view.rotated.plane[1].offset);
200                                 break;
201
202                         default:
203                                 MISSING_CASE(vma->ggtt_view.type);
204                                 break;
205                         }
206                 }
207                 if (vma->fence)
208                         seq_printf(m, " , fence: %d%s",
209                                    vma->fence->id,
210                                    i915_active_request_isset(&vma->last_fence) ? "*" : "");
211                 seq_puts(m, ")");
212         }
213         if (obj->stolen)
214                 seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
215
216         engine = i915_gem_object_last_write_engine(obj);
217         if (engine)
218                 seq_printf(m, " (%s)", engine->name);
219
220         frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
221         if (frontbuffer_bits)
222                 seq_printf(m, " (frontbuffer: 0x%03x)", frontbuffer_bits);
223 }
224
225 static int obj_rank_by_stolen(const void *A, const void *B)
226 {
227         const struct drm_i915_gem_object *a =
228                 *(const struct drm_i915_gem_object **)A;
229         const struct drm_i915_gem_object *b =
230                 *(const struct drm_i915_gem_object **)B;
231
232         if (a->stolen->start < b->stolen->start)
233                 return -1;
234         if (a->stolen->start > b->stolen->start)
235                 return 1;
236         return 0;
237 }
238
239 static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
240 {
241         struct drm_i915_private *dev_priv = node_to_i915(m->private);
242         struct drm_device *dev = &dev_priv->drm;
243         struct drm_i915_gem_object **objects;
244         struct drm_i915_gem_object *obj;
245         u64 total_obj_size, total_gtt_size;
246         unsigned long total, count, n;
247         int ret;
248
249         total = READ_ONCE(dev_priv->mm.object_count);
250         objects = kvmalloc_array(total, sizeof(*objects), GFP_KERNEL);
251         if (!objects)
252                 return -ENOMEM;
253
254         ret = mutex_lock_interruptible(&dev->struct_mutex);
255         if (ret)
256                 goto out;
257
258         total_obj_size = total_gtt_size = count = 0;
259
260         spin_lock(&dev_priv->mm.obj_lock);
261         list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
262                 if (count == total)
263                         break;
264
265                 if (obj->stolen == NULL)
266                         continue;
267
268                 objects[count++] = obj;
269                 total_obj_size += obj->base.size;
270                 total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
271
272         }
273         list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
274                 if (count == total)
275                         break;
276
277                 if (obj->stolen == NULL)
278                         continue;
279
280                 objects[count++] = obj;
281                 total_obj_size += obj->base.size;
282         }
283         spin_unlock(&dev_priv->mm.obj_lock);
284
285         sort(objects, count, sizeof(*objects), obj_rank_by_stolen, NULL);
286
287         seq_puts(m, "Stolen:\n");
288         for (n = 0; n < count; n++) {
289                 seq_puts(m, "   ");
290                 describe_obj(m, objects[n]);
291                 seq_putc(m, '\n');
292         }
293         seq_printf(m, "Total %lu objects, %llu bytes, %llu GTT size\n",
294                    count, total_obj_size, total_gtt_size);
295
296         mutex_unlock(&dev->struct_mutex);
297 out:
298         kvfree(objects);
299         return ret;
300 }
301
302 struct file_stats {
303         struct i915_address_space *vm;
304         unsigned long count;
305         u64 total, unbound;
306         u64 global, shared;
307         u64 active, inactive;
308         u64 closed;
309 };
310
311 static int per_file_stats(int id, void *ptr, void *data)
312 {
313         struct drm_i915_gem_object *obj = ptr;
314         struct file_stats *stats = data;
315         struct i915_vma *vma;
316
317         lockdep_assert_held(&obj->base.dev->struct_mutex);
318
319         stats->count++;
320         stats->total += obj->base.size;
321         if (!obj->bind_count)
322                 stats->unbound += obj->base.size;
323         if (obj->base.name || obj->base.dma_buf)
324                 stats->shared += obj->base.size;
325
326         list_for_each_entry(vma, &obj->vma.list, obj_link) {
327                 if (!drm_mm_node_allocated(&vma->node))
328                         continue;
329
330                 if (i915_vma_is_ggtt(vma)) {
331                         stats->global += vma->node.size;
332                 } else {
333                         if (vma->vm != stats->vm)
334                                 continue;
335                 }
336
337                 if (i915_vma_is_active(vma))
338                         stats->active += vma->node.size;
339                 else
340                         stats->inactive += vma->node.size;
341
342                 if (i915_vma_is_closed(vma))
343                         stats->closed += vma->node.size;
344         }
345
346         return 0;
347 }
348
349 #define print_file_stats(m, name, stats) do { \
350         if (stats.count) \
351                 seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound, %llu closed)\n", \
352                            name, \
353                            stats.count, \
354                            stats.total, \
355                            stats.active, \
356                            stats.inactive, \
357                            stats.global, \
358                            stats.shared, \
359                            stats.unbound, \
360                            stats.closed); \
361 } while (0)
362
363 static void print_batch_pool_stats(struct seq_file *m,
364                                    struct drm_i915_private *dev_priv)
365 {
366         struct drm_i915_gem_object *obj;
367         struct intel_engine_cs *engine;
368         struct file_stats stats = {};
369         enum intel_engine_id id;
370         int j;
371
372         for_each_engine(engine, dev_priv, id) {
373                 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
374                         list_for_each_entry(obj,
375                                             &engine->batch_pool.cache_list[j],
376                                             batch_pool_link)
377                                 per_file_stats(0, obj, &stats);
378                 }
379         }
380
381         print_file_stats(m, "[k]batch pool", stats);
382 }
383
384 static void print_context_stats(struct seq_file *m,
385                                 struct drm_i915_private *i915)
386 {
387         struct file_stats kstats = {};
388         struct i915_gem_context *ctx;
389
390         list_for_each_entry(ctx, &i915->contexts.list, link) {
391                 struct intel_context *ce;
392
393                 list_for_each_entry(ce, &ctx->active_engines, active_link) {
394                         if (ce->state)
395                                 per_file_stats(0, ce->state->obj, &kstats);
396                         if (ce->ring)
397                                 per_file_stats(0, ce->ring->vma->obj, &kstats);
398                 }
399
400                 if (!IS_ERR_OR_NULL(ctx->file_priv)) {
401                         struct file_stats stats = { .vm = &ctx->ppgtt->vm, };
402                         struct drm_file *file = ctx->file_priv->file;
403                         struct task_struct *task;
404                         char name[80];
405
406                         spin_lock(&file->table_lock);
407                         idr_for_each(&file->object_idr, per_file_stats, &stats);
408                         spin_unlock(&file->table_lock);
409
410                         rcu_read_lock();
411                         task = pid_task(ctx->pid ?: file->pid, PIDTYPE_PID);
412                         snprintf(name, sizeof(name), "%s",
413                                  task ? task->comm : "<unknown>");
414                         rcu_read_unlock();
415
416                         print_file_stats(m, name, stats);
417                 }
418         }
419
420         print_file_stats(m, "[k]contexts", kstats);
421 }
422
423 static int i915_gem_object_info(struct seq_file *m, void *data)
424 {
425         struct drm_i915_private *dev_priv = node_to_i915(m->private);
426         struct drm_device *dev = &dev_priv->drm;
427         struct i915_ggtt *ggtt = &dev_priv->ggtt;
428         u32 count, mapped_count, purgeable_count, dpy_count, huge_count;
429         u64 size, mapped_size, purgeable_size, dpy_size, huge_size;
430         struct drm_i915_gem_object *obj;
431         unsigned int page_sizes = 0;
432         char buf[80];
433         int ret;
434
435         seq_printf(m, "%u objects, %llu bytes\n",
436                    dev_priv->mm.object_count,
437                    dev_priv->mm.object_memory);
438
439         size = count = 0;
440         mapped_size = mapped_count = 0;
441         purgeable_size = purgeable_count = 0;
442         huge_size = huge_count = 0;
443
444         spin_lock(&dev_priv->mm.obj_lock);
445         list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
446                 size += obj->base.size;
447                 ++count;
448
449                 if (obj->mm.madv == I915_MADV_DONTNEED) {
450                         purgeable_size += obj->base.size;
451                         ++purgeable_count;
452                 }
453
454                 if (obj->mm.mapping) {
455                         mapped_count++;
456                         mapped_size += obj->base.size;
457                 }
458
459                 if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
460                         huge_count++;
461                         huge_size += obj->base.size;
462                         page_sizes |= obj->mm.page_sizes.sg;
463                 }
464         }
465         seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
466
467         size = count = dpy_size = dpy_count = 0;
468         list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
469                 size += obj->base.size;
470                 ++count;
471
472                 if (obj->pin_global) {
473                         dpy_size += obj->base.size;
474                         ++dpy_count;
475                 }
476
477                 if (obj->mm.madv == I915_MADV_DONTNEED) {
478                         purgeable_size += obj->base.size;
479                         ++purgeable_count;
480                 }
481
482                 if (obj->mm.mapping) {
483                         mapped_count++;
484                         mapped_size += obj->base.size;
485                 }
486
487                 if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
488                         huge_count++;
489                         huge_size += obj->base.size;
490                         page_sizes |= obj->mm.page_sizes.sg;
491                 }
492         }
493         spin_unlock(&dev_priv->mm.obj_lock);
494
495         seq_printf(m, "%u bound objects, %llu bytes\n",
496                    count, size);
497         seq_printf(m, "%u purgeable objects, %llu bytes\n",
498                    purgeable_count, purgeable_size);
499         seq_printf(m, "%u mapped objects, %llu bytes\n",
500                    mapped_count, mapped_size);
501         seq_printf(m, "%u huge-paged objects (%s) %llu bytes\n",
502                    huge_count,
503                    stringify_page_sizes(page_sizes, buf, sizeof(buf)),
504                    huge_size);
505         seq_printf(m, "%u display objects (globally pinned), %llu bytes\n",
506                    dpy_count, dpy_size);
507
508         seq_printf(m, "%llu [%pa] gtt total\n",
509                    ggtt->vm.total, &ggtt->mappable_end);
510         seq_printf(m, "Supported page sizes: %s\n",
511                    stringify_page_sizes(INTEL_INFO(dev_priv)->page_sizes,
512                                         buf, sizeof(buf)));
513
514         seq_putc(m, '\n');
515
516         ret = mutex_lock_interruptible(&dev->struct_mutex);
517         if (ret)
518                 return ret;
519
520         print_batch_pool_stats(m, dev_priv);
521         print_context_stats(m, dev_priv);
522         mutex_unlock(&dev->struct_mutex);
523
524         return 0;
525 }
526
527 static int i915_gem_gtt_info(struct seq_file *m, void *data)
528 {
529         struct drm_info_node *node = m->private;
530         struct drm_i915_private *dev_priv = node_to_i915(node);
531         struct drm_device *dev = &dev_priv->drm;
532         struct drm_i915_gem_object **objects;
533         struct drm_i915_gem_object *obj;
534         u64 total_obj_size, total_gtt_size;
535         unsigned long nobject, n;
536         int count, ret;
537
538         nobject = READ_ONCE(dev_priv->mm.object_count);
539         objects = kvmalloc_array(nobject, sizeof(*objects), GFP_KERNEL);
540         if (!objects)
541                 return -ENOMEM;
542
543         ret = mutex_lock_interruptible(&dev->struct_mutex);
544         if (ret)
545                 return ret;
546
547         count = 0;
548         spin_lock(&dev_priv->mm.obj_lock);
549         list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
550                 objects[count++] = obj;
551                 if (count == nobject)
552                         break;
553         }
554         spin_unlock(&dev_priv->mm.obj_lock);
555
556         total_obj_size = total_gtt_size = 0;
557         for (n = 0;  n < count; n++) {
558                 obj = objects[n];
559
560                 seq_puts(m, "   ");
561                 describe_obj(m, obj);
562                 seq_putc(m, '\n');
563                 total_obj_size += obj->base.size;
564                 total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
565         }
566
567         mutex_unlock(&dev->struct_mutex);
568
569         seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
570                    count, total_obj_size, total_gtt_size);
571         kvfree(objects);
572
573         return 0;
574 }
575
576 static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
577 {
578         struct drm_i915_private *dev_priv = node_to_i915(m->private);
579         struct drm_device *dev = &dev_priv->drm;
580         struct drm_i915_gem_object *obj;
581         struct intel_engine_cs *engine;
582         enum intel_engine_id id;
583         int total = 0;
584         int ret, j;
585
586         ret = mutex_lock_interruptible(&dev->struct_mutex);
587         if (ret)
588                 return ret;
589
590         for_each_engine(engine, dev_priv, id) {
591                 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
592                         int count;
593
594                         count = 0;
595                         list_for_each_entry(obj,
596                                             &engine->batch_pool.cache_list[j],
597                                             batch_pool_link)
598                                 count++;
599                         seq_printf(m, "%s cache[%d]: %d objects\n",
600                                    engine->name, j, count);
601
602                         list_for_each_entry(obj,
603                                             &engine->batch_pool.cache_list[j],
604                                             batch_pool_link) {
605                                 seq_puts(m, "   ");
606                                 describe_obj(m, obj);
607                                 seq_putc(m, '\n');
608                         }
609
610                         total += count;
611                 }
612         }
613
614         seq_printf(m, "total: %d\n", total);
615
616         mutex_unlock(&dev->struct_mutex);
617
618         return 0;
619 }
620
621 static void gen8_display_interrupt_info(struct seq_file *m)
622 {
623         struct drm_i915_private *dev_priv = node_to_i915(m->private);
624         int pipe;
625
626         for_each_pipe(dev_priv, pipe) {
627                 enum intel_display_power_domain power_domain;
628                 intel_wakeref_t wakeref;
629
630                 power_domain = POWER_DOMAIN_PIPE(pipe);
631                 wakeref = intel_display_power_get_if_enabled(dev_priv,
632                                                              power_domain);
633                 if (!wakeref) {
634                         seq_printf(m, "Pipe %c power disabled\n",
635                                    pipe_name(pipe));
636                         continue;
637                 }
638                 seq_printf(m, "Pipe %c IMR:\t%08x\n",
639                            pipe_name(pipe),
640                            I915_READ(GEN8_DE_PIPE_IMR(pipe)));
641                 seq_printf(m, "Pipe %c IIR:\t%08x\n",
642                            pipe_name(pipe),
643                            I915_READ(GEN8_DE_PIPE_IIR(pipe)));
644                 seq_printf(m, "Pipe %c IER:\t%08x\n",
645                            pipe_name(pipe),
646                            I915_READ(GEN8_DE_PIPE_IER(pipe)));
647
648                 intel_display_power_put(dev_priv, power_domain, wakeref);
649         }
650
651         seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
652                    I915_READ(GEN8_DE_PORT_IMR));
653         seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
654                    I915_READ(GEN8_DE_PORT_IIR));
655         seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
656                    I915_READ(GEN8_DE_PORT_IER));
657
658         seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
659                    I915_READ(GEN8_DE_MISC_IMR));
660         seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
661                    I915_READ(GEN8_DE_MISC_IIR));
662         seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
663                    I915_READ(GEN8_DE_MISC_IER));
664
665         seq_printf(m, "PCU interrupt mask:\t%08x\n",
666                    I915_READ(GEN8_PCU_IMR));
667         seq_printf(m, "PCU interrupt identity:\t%08x\n",
668                    I915_READ(GEN8_PCU_IIR));
669         seq_printf(m, "PCU interrupt enable:\t%08x\n",
670                    I915_READ(GEN8_PCU_IER));
671 }
672
673 static int i915_interrupt_info(struct seq_file *m, void *data)
674 {
675         struct drm_i915_private *dev_priv = node_to_i915(m->private);
676         struct intel_engine_cs *engine;
677         enum intel_engine_id id;
678         intel_wakeref_t wakeref;
679         int i, pipe;
680
681         wakeref = intel_runtime_pm_get(dev_priv);
682
683         if (IS_CHERRYVIEW(dev_priv)) {
684                 intel_wakeref_t pref;
685
686                 seq_printf(m, "Master Interrupt Control:\t%08x\n",
687                            I915_READ(GEN8_MASTER_IRQ));
688
689                 seq_printf(m, "Display IER:\t%08x\n",
690                            I915_READ(VLV_IER));
691                 seq_printf(m, "Display IIR:\t%08x\n",
692                            I915_READ(VLV_IIR));
693                 seq_printf(m, "Display IIR_RW:\t%08x\n",
694                            I915_READ(VLV_IIR_RW));
695                 seq_printf(m, "Display IMR:\t%08x\n",
696                            I915_READ(VLV_IMR));
697                 for_each_pipe(dev_priv, pipe) {
698                         enum intel_display_power_domain power_domain;
699
700                         power_domain = POWER_DOMAIN_PIPE(pipe);
701                         pref = intel_display_power_get_if_enabled(dev_priv,
702                                                                   power_domain);
703                         if (!pref) {
704                                 seq_printf(m, "Pipe %c power disabled\n",
705                                            pipe_name(pipe));
706                                 continue;
707                         }
708
709                         seq_printf(m, "Pipe %c stat:\t%08x\n",
710                                    pipe_name(pipe),
711                                    I915_READ(PIPESTAT(pipe)));
712
713                         intel_display_power_put(dev_priv, power_domain, pref);
714                 }
715
716                 pref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
717                 seq_printf(m, "Port hotplug:\t%08x\n",
718                            I915_READ(PORT_HOTPLUG_EN));
719                 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
720                            I915_READ(VLV_DPFLIPSTAT));
721                 seq_printf(m, "DPINVGTT:\t%08x\n",
722                            I915_READ(DPINVGTT));
723                 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, pref);
724
725                 for (i = 0; i < 4; i++) {
726                         seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
727                                    i, I915_READ(GEN8_GT_IMR(i)));
728                         seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
729                                    i, I915_READ(GEN8_GT_IIR(i)));
730                         seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
731                                    i, I915_READ(GEN8_GT_IER(i)));
732                 }
733
734                 seq_printf(m, "PCU interrupt mask:\t%08x\n",
735                            I915_READ(GEN8_PCU_IMR));
736                 seq_printf(m, "PCU interrupt identity:\t%08x\n",
737                            I915_READ(GEN8_PCU_IIR));
738                 seq_printf(m, "PCU interrupt enable:\t%08x\n",
739                            I915_READ(GEN8_PCU_IER));
740         } else if (INTEL_GEN(dev_priv) >= 11) {
741                 seq_printf(m, "Master Interrupt Control:  %08x\n",
742                            I915_READ(GEN11_GFX_MSTR_IRQ));
743
744                 seq_printf(m, "Render/Copy Intr Enable:   %08x\n",
745                            I915_READ(GEN11_RENDER_COPY_INTR_ENABLE));
746                 seq_printf(m, "VCS/VECS Intr Enable:      %08x\n",
747                            I915_READ(GEN11_VCS_VECS_INTR_ENABLE));
748                 seq_printf(m, "GUC/SG Intr Enable:\t   %08x\n",
749                            I915_READ(GEN11_GUC_SG_INTR_ENABLE));
750                 seq_printf(m, "GPM/WGBOXPERF Intr Enable: %08x\n",
751                            I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE));
752                 seq_printf(m, "Crypto Intr Enable:\t   %08x\n",
753                            I915_READ(GEN11_CRYPTO_RSVD_INTR_ENABLE));
754                 seq_printf(m, "GUnit/CSME Intr Enable:\t   %08x\n",
755                            I915_READ(GEN11_GUNIT_CSME_INTR_ENABLE));
756
757                 seq_printf(m, "Display Interrupt Control:\t%08x\n",
758                            I915_READ(GEN11_DISPLAY_INT_CTL));
759
760                 gen8_display_interrupt_info(m);
761         } else if (INTEL_GEN(dev_priv) >= 8) {
762                 seq_printf(m, "Master Interrupt Control:\t%08x\n",
763                            I915_READ(GEN8_MASTER_IRQ));
764
765                 for (i = 0; i < 4; i++) {
766                         seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
767                                    i, I915_READ(GEN8_GT_IMR(i)));
768                         seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
769                                    i, I915_READ(GEN8_GT_IIR(i)));
770                         seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
771                                    i, I915_READ(GEN8_GT_IER(i)));
772                 }
773
774                 gen8_display_interrupt_info(m);
775         } else if (IS_VALLEYVIEW(dev_priv)) {
776                 seq_printf(m, "Display IER:\t%08x\n",
777                            I915_READ(VLV_IER));
778                 seq_printf(m, "Display IIR:\t%08x\n",
779                            I915_READ(VLV_IIR));
780                 seq_printf(m, "Display IIR_RW:\t%08x\n",
781                            I915_READ(VLV_IIR_RW));
782                 seq_printf(m, "Display IMR:\t%08x\n",
783                            I915_READ(VLV_IMR));
784                 for_each_pipe(dev_priv, pipe) {
785                         enum intel_display_power_domain power_domain;
786                         intel_wakeref_t pref;
787
788                         power_domain = POWER_DOMAIN_PIPE(pipe);
789                         pref = intel_display_power_get_if_enabled(dev_priv,
790                                                                   power_domain);
791                         if (!pref) {
792                                 seq_printf(m, "Pipe %c power disabled\n",
793                                            pipe_name(pipe));
794                                 continue;
795                         }
796
797                         seq_printf(m, "Pipe %c stat:\t%08x\n",
798                                    pipe_name(pipe),
799                                    I915_READ(PIPESTAT(pipe)));
800                         intel_display_power_put(dev_priv, power_domain, pref);
801                 }
802
803                 seq_printf(m, "Master IER:\t%08x\n",
804                            I915_READ(VLV_MASTER_IER));
805
806                 seq_printf(m, "Render IER:\t%08x\n",
807                            I915_READ(GTIER));
808                 seq_printf(m, "Render IIR:\t%08x\n",
809                            I915_READ(GTIIR));
810                 seq_printf(m, "Render IMR:\t%08x\n",
811                            I915_READ(GTIMR));
812
813                 seq_printf(m, "PM IER:\t\t%08x\n",
814                            I915_READ(GEN6_PMIER));
815                 seq_printf(m, "PM IIR:\t\t%08x\n",
816                            I915_READ(GEN6_PMIIR));
817                 seq_printf(m, "PM IMR:\t\t%08x\n",
818                            I915_READ(GEN6_PMIMR));
819
820                 seq_printf(m, "Port hotplug:\t%08x\n",
821                            I915_READ(PORT_HOTPLUG_EN));
822                 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
823                            I915_READ(VLV_DPFLIPSTAT));
824                 seq_printf(m, "DPINVGTT:\t%08x\n",
825                            I915_READ(DPINVGTT));
826
827         } else if (!HAS_PCH_SPLIT(dev_priv)) {
828                 seq_printf(m, "Interrupt enable:    %08x\n",
829                            I915_READ(IER));
830                 seq_printf(m, "Interrupt identity:  %08x\n",
831                            I915_READ(IIR));
832                 seq_printf(m, "Interrupt mask:      %08x\n",
833                            I915_READ(IMR));
834                 for_each_pipe(dev_priv, pipe)
835                         seq_printf(m, "Pipe %c stat:         %08x\n",
836                                    pipe_name(pipe),
837                                    I915_READ(PIPESTAT(pipe)));
838         } else {
839                 seq_printf(m, "North Display Interrupt enable:          %08x\n",
840                            I915_READ(DEIER));
841                 seq_printf(m, "North Display Interrupt identity:        %08x\n",
842                            I915_READ(DEIIR));
843                 seq_printf(m, "North Display Interrupt mask:            %08x\n",
844                            I915_READ(DEIMR));
845                 seq_printf(m, "South Display Interrupt enable:          %08x\n",
846                            I915_READ(SDEIER));
847                 seq_printf(m, "South Display Interrupt identity:        %08x\n",
848                            I915_READ(SDEIIR));
849                 seq_printf(m, "South Display Interrupt mask:            %08x\n",
850                            I915_READ(SDEIMR));
851                 seq_printf(m, "Graphics Interrupt enable:               %08x\n",
852                            I915_READ(GTIER));
853                 seq_printf(m, "Graphics Interrupt identity:             %08x\n",
854                            I915_READ(GTIIR));
855                 seq_printf(m, "Graphics Interrupt mask:         %08x\n",
856                            I915_READ(GTIMR));
857         }
858
859         if (INTEL_GEN(dev_priv) >= 11) {
860                 seq_printf(m, "RCS Intr Mask:\t %08x\n",
861                            I915_READ(GEN11_RCS0_RSVD_INTR_MASK));
862                 seq_printf(m, "BCS Intr Mask:\t %08x\n",
863                            I915_READ(GEN11_BCS_RSVD_INTR_MASK));
864                 seq_printf(m, "VCS0/VCS1 Intr Mask:\t %08x\n",
865                            I915_READ(GEN11_VCS0_VCS1_INTR_MASK));
866                 seq_printf(m, "VCS2/VCS3 Intr Mask:\t %08x\n",
867                            I915_READ(GEN11_VCS2_VCS3_INTR_MASK));
868                 seq_printf(m, "VECS0/VECS1 Intr Mask:\t %08x\n",
869                            I915_READ(GEN11_VECS0_VECS1_INTR_MASK));
870                 seq_printf(m, "GUC/SG Intr Mask:\t %08x\n",
871                            I915_READ(GEN11_GUC_SG_INTR_MASK));
872                 seq_printf(m, "GPM/WGBOXPERF Intr Mask: %08x\n",
873                            I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK));
874                 seq_printf(m, "Crypto Intr Mask:\t %08x\n",
875                            I915_READ(GEN11_CRYPTO_RSVD_INTR_MASK));
876                 seq_printf(m, "Gunit/CSME Intr Mask:\t %08x\n",
877                            I915_READ(GEN11_GUNIT_CSME_INTR_MASK));
878
879         } else if (INTEL_GEN(dev_priv) >= 6) {
880                 for_each_engine(engine, dev_priv, id) {
881                         seq_printf(m,
882                                    "Graphics Interrupt mask (%s):       %08x\n",
883                                    engine->name, ENGINE_READ(engine, RING_IMR));
884                 }
885         }
886
887         intel_runtime_pm_put(dev_priv, wakeref);
888
889         return 0;
890 }
891
892 static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
893 {
894         struct drm_i915_private *dev_priv = node_to_i915(m->private);
895         struct drm_device *dev = &dev_priv->drm;
896         int i, ret;
897
898         ret = mutex_lock_interruptible(&dev->struct_mutex);
899         if (ret)
900                 return ret;
901
902         seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
903         for (i = 0; i < dev_priv->num_fence_regs; i++) {
904                 struct i915_vma *vma = dev_priv->fence_regs[i].vma;
905
906                 seq_printf(m, "Fence %d, pin count = %d, object = ",
907                            i, dev_priv->fence_regs[i].pin_count);
908                 if (!vma)
909                         seq_puts(m, "unused");
910                 else
911                         describe_obj(m, vma->obj);
912                 seq_putc(m, '\n');
913         }
914
915         mutex_unlock(&dev->struct_mutex);
916         return 0;
917 }
918
919 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
920 static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
921                               size_t count, loff_t *pos)
922 {
923         struct i915_gpu_state *error;
924         ssize_t ret;
925         void *buf;
926
927         error = file->private_data;
928         if (!error)
929                 return 0;
930
931         /* Bounce buffer required because of kernfs __user API convenience. */
932         buf = kmalloc(count, GFP_KERNEL);
933         if (!buf)
934                 return -ENOMEM;
935
936         ret = i915_gpu_state_copy_to_buffer(error, buf, *pos, count);
937         if (ret <= 0)
938                 goto out;
939
940         if (!copy_to_user(ubuf, buf, ret))
941                 *pos += ret;
942         else
943                 ret = -EFAULT;
944
945 out:
946         kfree(buf);
947         return ret;
948 }
949
950 static int gpu_state_release(struct inode *inode, struct file *file)
951 {
952         i915_gpu_state_put(file->private_data);
953         return 0;
954 }
955
956 static int i915_gpu_info_open(struct inode *inode, struct file *file)
957 {
958         struct drm_i915_private *i915 = inode->i_private;
959         struct i915_gpu_state *gpu;
960         intel_wakeref_t wakeref;
961
962         gpu = NULL;
963         with_intel_runtime_pm(i915, wakeref)
964                 gpu = i915_capture_gpu_state(i915);
965         if (IS_ERR(gpu))
966                 return PTR_ERR(gpu);
967
968         file->private_data = gpu;
969         return 0;
970 }
971
972 static const struct file_operations i915_gpu_info_fops = {
973         .owner = THIS_MODULE,
974         .open = i915_gpu_info_open,
975         .read = gpu_state_read,
976         .llseek = default_llseek,
977         .release = gpu_state_release,
978 };
979
980 static ssize_t
981 i915_error_state_write(struct file *filp,
982                        const char __user *ubuf,
983                        size_t cnt,
984                        loff_t *ppos)
985 {
986         struct i915_gpu_state *error = filp->private_data;
987
988         if (!error)
989                 return 0;
990
991         DRM_DEBUG_DRIVER("Resetting error state\n");
992         i915_reset_error_state(error->i915);
993
994         return cnt;
995 }
996
997 static int i915_error_state_open(struct inode *inode, struct file *file)
998 {
999         struct i915_gpu_state *error;
1000
1001         error = i915_first_error_state(inode->i_private);
1002         if (IS_ERR(error))
1003                 return PTR_ERR(error);
1004
1005         file->private_data  = error;
1006         return 0;
1007 }
1008
1009 static const struct file_operations i915_error_state_fops = {
1010         .owner = THIS_MODULE,
1011         .open = i915_error_state_open,
1012         .read = gpu_state_read,
1013         .write = i915_error_state_write,
1014         .llseek = default_llseek,
1015         .release = gpu_state_release,
1016 };
1017 #endif
1018
1019 static int i915_frequency_info(struct seq_file *m, void *unused)
1020 {
1021         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1022         struct intel_rps *rps = &dev_priv->gt_pm.rps;
1023         intel_wakeref_t wakeref;
1024         int ret = 0;
1025
1026         wakeref = intel_runtime_pm_get(dev_priv);
1027
1028         if (IS_GEN(dev_priv, 5)) {
1029                 u16 rgvswctl = I915_READ16(MEMSWCTL);
1030                 u16 rgvstat = I915_READ16(MEMSTAT_ILK);
1031
1032                 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
1033                 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
1034                 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
1035                            MEMSTAT_VID_SHIFT);
1036                 seq_printf(m, "Current P-state: %d\n",
1037                            (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
1038         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1039                 u32 rpmodectl, freq_sts;
1040
1041                 mutex_lock(&dev_priv->pcu_lock);
1042
1043                 rpmodectl = I915_READ(GEN6_RP_CONTROL);
1044                 seq_printf(m, "Video Turbo Mode: %s\n",
1045                            yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
1046                 seq_printf(m, "HW control enabled: %s\n",
1047                            yesno(rpmodectl & GEN6_RP_ENABLE));
1048                 seq_printf(m, "SW control enabled: %s\n",
1049                            yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
1050                                   GEN6_RP_MEDIA_SW_MODE));
1051
1052                 freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
1053                 seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
1054                 seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
1055
1056                 seq_printf(m, "actual GPU freq: %d MHz\n",
1057                            intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
1058
1059                 seq_printf(m, "current GPU freq: %d MHz\n",
1060                            intel_gpu_freq(dev_priv, rps->cur_freq));
1061
1062                 seq_printf(m, "max GPU freq: %d MHz\n",
1063                            intel_gpu_freq(dev_priv, rps->max_freq));
1064
1065                 seq_printf(m, "min GPU freq: %d MHz\n",
1066                            intel_gpu_freq(dev_priv, rps->min_freq));
1067
1068                 seq_printf(m, "idle GPU freq: %d MHz\n",
1069                            intel_gpu_freq(dev_priv, rps->idle_freq));
1070
1071                 seq_printf(m,
1072                            "efficient (RPe) frequency: %d MHz\n",
1073                            intel_gpu_freq(dev_priv, rps->efficient_freq));
1074                 mutex_unlock(&dev_priv->pcu_lock);
1075         } else if (INTEL_GEN(dev_priv) >= 6) {
1076                 u32 rp_state_limits;
1077                 u32 gt_perf_status;
1078                 u32 rp_state_cap;
1079                 u32 rpmodectl, rpinclimit, rpdeclimit;
1080                 u32 rpstat, cagf, reqf;
1081                 u32 rpupei, rpcurup, rpprevup;
1082                 u32 rpdownei, rpcurdown, rpprevdown;
1083                 u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
1084                 int max_freq;
1085
1086                 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
1087                 if (IS_GEN9_LP(dev_priv)) {
1088                         rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
1089                         gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
1090                 } else {
1091                         rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
1092                         gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
1093                 }
1094
1095                 /* RPSTAT1 is in the GT power well */
1096                 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
1097
1098                 reqf = I915_READ(GEN6_RPNSWREQ);
1099                 if (INTEL_GEN(dev_priv) >= 9)
1100                         reqf >>= 23;
1101                 else {
1102                         reqf &= ~GEN6_TURBO_DISABLE;
1103                         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
1104                                 reqf >>= 24;
1105                         else
1106                                 reqf >>= 25;
1107                 }
1108                 reqf = intel_gpu_freq(dev_priv, reqf);
1109
1110                 rpmodectl = I915_READ(GEN6_RP_CONTROL);
1111                 rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
1112                 rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
1113
1114                 rpstat = I915_READ(GEN6_RPSTAT1);
1115                 rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
1116                 rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
1117                 rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
1118                 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
1119                 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
1120                 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
1121                 cagf = intel_gpu_freq(dev_priv,
1122                                       intel_get_cagf(dev_priv, rpstat));
1123
1124                 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
1125
1126                 if (INTEL_GEN(dev_priv) >= 11) {
1127                         pm_ier = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE);
1128                         pm_imr = I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK);
1129                         /*
1130                          * The equivalent to the PM ISR & IIR cannot be read
1131                          * without affecting the current state of the system
1132                          */
1133                         pm_isr = 0;
1134                         pm_iir = 0;
1135                 } else if (INTEL_GEN(dev_priv) >= 8) {
1136                         pm_ier = I915_READ(GEN8_GT_IER(2));
1137                         pm_imr = I915_READ(GEN8_GT_IMR(2));
1138                         pm_isr = I915_READ(GEN8_GT_ISR(2));
1139                         pm_iir = I915_READ(GEN8_GT_IIR(2));
1140                 } else {
1141                         pm_ier = I915_READ(GEN6_PMIER);
1142                         pm_imr = I915_READ(GEN6_PMIMR);
1143                         pm_isr = I915_READ(GEN6_PMISR);
1144                         pm_iir = I915_READ(GEN6_PMIIR);
1145                 }
1146                 pm_mask = I915_READ(GEN6_PMINTRMSK);
1147
1148                 seq_printf(m, "Video Turbo Mode: %s\n",
1149                            yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
1150                 seq_printf(m, "HW control enabled: %s\n",
1151                            yesno(rpmodectl & GEN6_RP_ENABLE));
1152                 seq_printf(m, "SW control enabled: %s\n",
1153                            yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
1154                                   GEN6_RP_MEDIA_SW_MODE));
1155
1156                 seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
1157                            pm_ier, pm_imr, pm_mask);
1158                 if (INTEL_GEN(dev_priv) <= 10)
1159                         seq_printf(m, "PM ISR=0x%08x IIR=0x%08x\n",
1160                                    pm_isr, pm_iir);
1161                 seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
1162                            rps->pm_intrmsk_mbz);
1163                 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
1164                 seq_printf(m, "Render p-state ratio: %d\n",
1165                            (gt_perf_status & (INTEL_GEN(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
1166                 seq_printf(m, "Render p-state VID: %d\n",
1167                            gt_perf_status & 0xff);
1168                 seq_printf(m, "Render p-state limit: %d\n",
1169                            rp_state_limits & 0xff);
1170                 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
1171                 seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
1172                 seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
1173                 seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
1174                 seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
1175                 seq_printf(m, "CAGF: %dMHz\n", cagf);
1176                 seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
1177                            rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei));
1178                 seq_printf(m, "RP CUR UP: %d (%dus)\n",
1179                            rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
1180                 seq_printf(m, "RP PREV UP: %d (%dus)\n",
1181                            rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
1182                 seq_printf(m, "Up threshold: %d%%\n",
1183                            rps->power.up_threshold);
1184
1185                 seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
1186                            rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
1187                 seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
1188                            rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
1189                 seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
1190                            rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
1191                 seq_printf(m, "Down threshold: %d%%\n",
1192                            rps->power.down_threshold);
1193
1194                 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
1195                             rp_state_cap >> 16) & 0xff;
1196                 max_freq *= (IS_GEN9_BC(dev_priv) ||
1197                              INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
1198                 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
1199                            intel_gpu_freq(dev_priv, max_freq));
1200
1201                 max_freq = (rp_state_cap & 0xff00) >> 8;
1202                 max_freq *= (IS_GEN9_BC(dev_priv) ||
1203                              INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
1204                 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
1205                            intel_gpu_freq(dev_priv, max_freq));
1206
1207                 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
1208                             rp_state_cap >> 0) & 0xff;
1209                 max_freq *= (IS_GEN9_BC(dev_priv) ||
1210                              INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
1211                 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
1212                            intel_gpu_freq(dev_priv, max_freq));
1213                 seq_printf(m, "Max overclocked frequency: %dMHz\n",
1214                            intel_gpu_freq(dev_priv, rps->max_freq));
1215
1216                 seq_printf(m, "Current freq: %d MHz\n",
1217                            intel_gpu_freq(dev_priv, rps->cur_freq));
1218                 seq_printf(m, "Actual freq: %d MHz\n", cagf);
1219                 seq_printf(m, "Idle freq: %d MHz\n",
1220                            intel_gpu_freq(dev_priv, rps->idle_freq));
1221                 seq_printf(m, "Min freq: %d MHz\n",
1222                            intel_gpu_freq(dev_priv, rps->min_freq));
1223                 seq_printf(m, "Boost freq: %d MHz\n",
1224                            intel_gpu_freq(dev_priv, rps->boost_freq));
1225                 seq_printf(m, "Max freq: %d MHz\n",
1226                            intel_gpu_freq(dev_priv, rps->max_freq));
1227                 seq_printf(m,
1228                            "efficient (RPe) frequency: %d MHz\n",
1229                            intel_gpu_freq(dev_priv, rps->efficient_freq));
1230         } else {
1231                 seq_puts(m, "no P-state info available\n");
1232         }
1233
1234         seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk.hw.cdclk);
1235         seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
1236         seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
1237
1238         intel_runtime_pm_put(dev_priv, wakeref);
1239         return ret;
1240 }
1241
1242 static void i915_instdone_info(struct drm_i915_private *dev_priv,
1243                                struct seq_file *m,
1244                                struct intel_instdone *instdone)
1245 {
1246         int slice;
1247         int subslice;
1248
1249         seq_printf(m, "\t\tINSTDONE: 0x%08x\n",
1250                    instdone->instdone);
1251
1252         if (INTEL_GEN(dev_priv) <= 3)
1253                 return;
1254
1255         seq_printf(m, "\t\tSC_INSTDONE: 0x%08x\n",
1256                    instdone->slice_common);
1257
1258         if (INTEL_GEN(dev_priv) <= 6)
1259                 return;
1260
1261         for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1262                 seq_printf(m, "\t\tSAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
1263                            slice, subslice, instdone->sampler[slice][subslice]);
1264
1265         for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1266                 seq_printf(m, "\t\tROW_INSTDONE[%d][%d]: 0x%08x\n",
1267                            slice, subslice, instdone->row[slice][subslice]);
1268 }
1269
1270 static int i915_hangcheck_info(struct seq_file *m, void *unused)
1271 {
1272         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1273         struct intel_engine_cs *engine;
1274         u64 acthd[I915_NUM_ENGINES];
1275         u32 seqno[I915_NUM_ENGINES];
1276         struct intel_instdone instdone;
1277         intel_wakeref_t wakeref;
1278         enum intel_engine_id id;
1279
1280         seq_printf(m, "Reset flags: %lx\n", dev_priv->gpu_error.flags);
1281         if (test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
1282                 seq_puts(m, "\tWedged\n");
1283         if (test_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags))
1284                 seq_puts(m, "\tDevice (global) reset in progress\n");
1285
1286         if (!i915_modparams.enable_hangcheck) {
1287                 seq_puts(m, "Hangcheck disabled\n");
1288                 return 0;
1289         }
1290
1291         with_intel_runtime_pm(dev_priv, wakeref) {
1292                 for_each_engine(engine, dev_priv, id) {
1293                         acthd[id] = intel_engine_get_active_head(engine);
1294                         seqno[id] = intel_engine_get_hangcheck_seqno(engine);
1295                 }
1296
1297                 intel_engine_get_instdone(dev_priv->engine[RCS0], &instdone);
1298         }
1299
1300         if (timer_pending(&dev_priv->gpu_error.hangcheck_work.timer))
1301                 seq_printf(m, "Hangcheck active, timer fires in %dms\n",
1302                            jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires -
1303                                             jiffies));
1304         else if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work))
1305                 seq_puts(m, "Hangcheck active, work pending\n");
1306         else
1307                 seq_puts(m, "Hangcheck inactive\n");
1308
1309         seq_printf(m, "GT active? %s\n", yesno(dev_priv->gt.awake));
1310
1311         for_each_engine(engine, dev_priv, id) {
1312                 seq_printf(m, "%s:\n", engine->name);
1313                 seq_printf(m, "\tseqno = %x [current %x, last %x], %dms ago\n",
1314                            engine->hangcheck.last_seqno,
1315                            seqno[id],
1316                            engine->hangcheck.next_seqno,
1317                            jiffies_to_msecs(jiffies -
1318                                             engine->hangcheck.action_timestamp));
1319
1320                 seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
1321                            (long long)engine->hangcheck.acthd,
1322                            (long long)acthd[id]);
1323
1324                 if (engine->id == RCS0) {
1325                         seq_puts(m, "\tinstdone read =\n");
1326
1327                         i915_instdone_info(dev_priv, m, &instdone);
1328
1329                         seq_puts(m, "\tinstdone accu =\n");
1330
1331                         i915_instdone_info(dev_priv, m,
1332                                            &engine->hangcheck.instdone);
1333                 }
1334         }
1335
1336         return 0;
1337 }
1338
1339 static int i915_reset_info(struct seq_file *m, void *unused)
1340 {
1341         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1342         struct i915_gpu_error *error = &dev_priv->gpu_error;
1343         struct intel_engine_cs *engine;
1344         enum intel_engine_id id;
1345
1346         seq_printf(m, "full gpu reset = %u\n", i915_reset_count(error));
1347
1348         for_each_engine(engine, dev_priv, id) {
1349                 seq_printf(m, "%s = %u\n", engine->name,
1350                            i915_reset_engine_count(error, engine));
1351         }
1352
1353         return 0;
1354 }
1355
1356 static int ironlake_drpc_info(struct seq_file *m)
1357 {
1358         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1359         u32 rgvmodectl, rstdbyctl;
1360         u16 crstandvid;
1361
1362         rgvmodectl = I915_READ(MEMMODECTL);
1363         rstdbyctl = I915_READ(RSTDBYCTL);
1364         crstandvid = I915_READ16(CRSTANDVID);
1365
1366         seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
1367         seq_printf(m, "Boost freq: %d\n",
1368                    (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
1369                    MEMMODE_BOOST_FREQ_SHIFT);
1370         seq_printf(m, "HW control enabled: %s\n",
1371                    yesno(rgvmodectl & MEMMODE_HWIDLE_EN));
1372         seq_printf(m, "SW control enabled: %s\n",
1373                    yesno(rgvmodectl & MEMMODE_SWMODE_EN));
1374         seq_printf(m, "Gated voltage change: %s\n",
1375                    yesno(rgvmodectl & MEMMODE_RCLK_GATE));
1376         seq_printf(m, "Starting frequency: P%d\n",
1377                    (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
1378         seq_printf(m, "Max P-state: P%d\n",
1379                    (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
1380         seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
1381         seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
1382         seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1383         seq_printf(m, "Render standby enabled: %s\n",
1384                    yesno(!(rstdbyctl & RCX_SW_EXIT)));
1385         seq_puts(m, "Current RS state: ");
1386         switch (rstdbyctl & RSX_STATUS_MASK) {
1387         case RSX_STATUS_ON:
1388                 seq_puts(m, "on\n");
1389                 break;
1390         case RSX_STATUS_RC1:
1391                 seq_puts(m, "RC1\n");
1392                 break;
1393         case RSX_STATUS_RC1E:
1394                 seq_puts(m, "RC1E\n");
1395                 break;
1396         case RSX_STATUS_RS1:
1397                 seq_puts(m, "RS1\n");
1398                 break;
1399         case RSX_STATUS_RS2:
1400                 seq_puts(m, "RS2 (RC6)\n");
1401                 break;
1402         case RSX_STATUS_RS3:
1403                 seq_puts(m, "RC3 (RC6+)\n");
1404                 break;
1405         default:
1406                 seq_puts(m, "unknown\n");
1407                 break;
1408         }
1409
1410         return 0;
1411 }
1412
1413 static int i915_forcewake_domains(struct seq_file *m, void *data)
1414 {
1415         struct drm_i915_private *i915 = node_to_i915(m->private);
1416         struct intel_uncore *uncore = &i915->uncore;
1417         struct intel_uncore_forcewake_domain *fw_domain;
1418         unsigned int tmp;
1419
1420         seq_printf(m, "user.bypass_count = %u\n",
1421                    uncore->user_forcewake.count);
1422
1423         for_each_fw_domain(fw_domain, uncore, tmp)
1424                 seq_printf(m, "%s.wake_count = %u\n",
1425                            intel_uncore_forcewake_domain_to_str(fw_domain->id),
1426                            READ_ONCE(fw_domain->wake_count));
1427
1428         return 0;
1429 }
1430
1431 static void print_rc6_res(struct seq_file *m,
1432                           const char *title,
1433                           const i915_reg_t reg)
1434 {
1435         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1436
1437         seq_printf(m, "%s %u (%llu us)\n",
1438                    title, I915_READ(reg),
1439                    intel_rc6_residency_us(dev_priv, reg));
1440 }
1441
1442 static int vlv_drpc_info(struct seq_file *m)
1443 {
1444         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1445         u32 rcctl1, pw_status;
1446
1447         pw_status = I915_READ(VLV_GTLC_PW_STATUS);
1448         rcctl1 = I915_READ(GEN6_RC_CONTROL);
1449
1450         seq_printf(m, "RC6 Enabled: %s\n",
1451                    yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
1452                                         GEN6_RC_CTL_EI_MODE(1))));
1453         seq_printf(m, "Render Power Well: %s\n",
1454                    (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
1455         seq_printf(m, "Media Power Well: %s\n",
1456                    (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
1457
1458         print_rc6_res(m, "Render RC6 residency since boot:", VLV_GT_RENDER_RC6);
1459         print_rc6_res(m, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6);
1460
1461         return i915_forcewake_domains(m, NULL);
1462 }
1463
1464 static int gen6_drpc_info(struct seq_file *m)
1465 {
1466         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1467         u32 gt_core_status, rcctl1, rc6vids = 0;
1468         u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
1469
1470         gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS);
1471         trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
1472
1473         rcctl1 = I915_READ(GEN6_RC_CONTROL);
1474         if (INTEL_GEN(dev_priv) >= 9) {
1475                 gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE);
1476                 gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
1477         }
1478
1479         if (INTEL_GEN(dev_priv) <= 7) {
1480                 mutex_lock(&dev_priv->pcu_lock);
1481                 sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS,
1482                                        &rc6vids);
1483                 mutex_unlock(&dev_priv->pcu_lock);
1484         }
1485
1486         seq_printf(m, "RC1e Enabled: %s\n",
1487                    yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1488         seq_printf(m, "RC6 Enabled: %s\n",
1489                    yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
1490         if (INTEL_GEN(dev_priv) >= 9) {
1491                 seq_printf(m, "Render Well Gating Enabled: %s\n",
1492                         yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
1493                 seq_printf(m, "Media Well Gating Enabled: %s\n",
1494                         yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE));
1495         }
1496         seq_printf(m, "Deep RC6 Enabled: %s\n",
1497                    yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1498         seq_printf(m, "Deepest RC6 Enabled: %s\n",
1499                    yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
1500         seq_puts(m, "Current RC state: ");
1501         switch (gt_core_status & GEN6_RCn_MASK) {
1502         case GEN6_RC0:
1503                 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
1504                         seq_puts(m, "Core Power Down\n");
1505                 else
1506                         seq_puts(m, "on\n");
1507                 break;
1508         case GEN6_RC3:
1509                 seq_puts(m, "RC3\n");
1510                 break;
1511         case GEN6_RC6:
1512                 seq_puts(m, "RC6\n");
1513                 break;
1514         case GEN6_RC7:
1515                 seq_puts(m, "RC7\n");
1516                 break;
1517         default:
1518                 seq_puts(m, "Unknown\n");
1519                 break;
1520         }
1521
1522         seq_printf(m, "Core Power Down: %s\n",
1523                    yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
1524         if (INTEL_GEN(dev_priv) >= 9) {
1525                 seq_printf(m, "Render Power Well: %s\n",
1526                         (gen9_powergate_status &
1527                          GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
1528                 seq_printf(m, "Media Power Well: %s\n",
1529                         (gen9_powergate_status &
1530                          GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down");
1531         }
1532
1533         /* Not exactly sure what this is */
1534         print_rc6_res(m, "RC6 \"Locked to RPn\" residency since boot:",
1535                       GEN6_GT_GFX_RC6_LOCKED);
1536         print_rc6_res(m, "RC6 residency since boot:", GEN6_GT_GFX_RC6);
1537         print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p);
1538         print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp);
1539
1540         if (INTEL_GEN(dev_priv) <= 7) {
1541                 seq_printf(m, "RC6   voltage: %dmV\n",
1542                            GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
1543                 seq_printf(m, "RC6+  voltage: %dmV\n",
1544                            GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
1545                 seq_printf(m, "RC6++ voltage: %dmV\n",
1546                            GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
1547         }
1548
1549         return i915_forcewake_domains(m, NULL);
1550 }
1551
1552 static int i915_drpc_info(struct seq_file *m, void *unused)
1553 {
1554         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1555         intel_wakeref_t wakeref;
1556         int err = -ENODEV;
1557
1558         with_intel_runtime_pm(dev_priv, wakeref) {
1559                 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1560                         err = vlv_drpc_info(m);
1561                 else if (INTEL_GEN(dev_priv) >= 6)
1562                         err = gen6_drpc_info(m);
1563                 else
1564                         err = ironlake_drpc_info(m);
1565         }
1566
1567         return err;
1568 }
1569
1570 static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
1571 {
1572         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1573
1574         seq_printf(m, "FB tracking busy bits: 0x%08x\n",
1575                    dev_priv->fb_tracking.busy_bits);
1576
1577         seq_printf(m, "FB tracking flip bits: 0x%08x\n",
1578                    dev_priv->fb_tracking.flip_bits);
1579
1580         return 0;
1581 }
1582
1583 static int i915_fbc_status(struct seq_file *m, void *unused)
1584 {
1585         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1586         struct intel_fbc *fbc = &dev_priv->fbc;
1587         intel_wakeref_t wakeref;
1588
1589         if (!HAS_FBC(dev_priv))
1590                 return -ENODEV;
1591
1592         wakeref = intel_runtime_pm_get(dev_priv);
1593         mutex_lock(&fbc->lock);
1594
1595         if (intel_fbc_is_active(dev_priv))
1596                 seq_puts(m, "FBC enabled\n");
1597         else
1598                 seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
1599
1600         if (intel_fbc_is_active(dev_priv)) {
1601                 u32 mask;
1602
1603                 if (INTEL_GEN(dev_priv) >= 8)
1604                         mask = I915_READ(IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
1605                 else if (INTEL_GEN(dev_priv) >= 7)
1606                         mask = I915_READ(IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
1607                 else if (INTEL_GEN(dev_priv) >= 5)
1608                         mask = I915_READ(ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
1609                 else if (IS_G4X(dev_priv))
1610                         mask = I915_READ(DPFC_STATUS) & DPFC_COMP_SEG_MASK;
1611                 else
1612                         mask = I915_READ(FBC_STATUS) & (FBC_STAT_COMPRESSING |
1613                                                         FBC_STAT_COMPRESSED);
1614
1615                 seq_printf(m, "Compressing: %s\n", yesno(mask));
1616         }
1617
1618         mutex_unlock(&fbc->lock);
1619         intel_runtime_pm_put(dev_priv, wakeref);
1620
1621         return 0;
1622 }
1623
1624 static int i915_fbc_false_color_get(void *data, u64 *val)
1625 {
1626         struct drm_i915_private *dev_priv = data;
1627
1628         if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
1629                 return -ENODEV;
1630
1631         *val = dev_priv->fbc.false_color;
1632
1633         return 0;
1634 }
1635
1636 static int i915_fbc_false_color_set(void *data, u64 val)
1637 {
1638         struct drm_i915_private *dev_priv = data;
1639         u32 reg;
1640
1641         if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
1642                 return -ENODEV;
1643
1644         mutex_lock(&dev_priv->fbc.lock);
1645
1646         reg = I915_READ(ILK_DPFC_CONTROL);
1647         dev_priv->fbc.false_color = val;
1648
1649         I915_WRITE(ILK_DPFC_CONTROL, val ?
1650                    (reg | FBC_CTL_FALSE_COLOR) :
1651                    (reg & ~FBC_CTL_FALSE_COLOR));
1652
1653         mutex_unlock(&dev_priv->fbc.lock);
1654         return 0;
1655 }
1656
1657 DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
1658                         i915_fbc_false_color_get, i915_fbc_false_color_set,
1659                         "%llu\n");
1660
1661 static int i915_ips_status(struct seq_file *m, void *unused)
1662 {
1663         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1664         intel_wakeref_t wakeref;
1665
1666         if (!HAS_IPS(dev_priv))
1667                 return -ENODEV;
1668
1669         wakeref = intel_runtime_pm_get(dev_priv);
1670
1671         seq_printf(m, "Enabled by kernel parameter: %s\n",
1672                    yesno(i915_modparams.enable_ips));
1673
1674         if (INTEL_GEN(dev_priv) >= 8) {
1675                 seq_puts(m, "Currently: unknown\n");
1676         } else {
1677                 if (I915_READ(IPS_CTL) & IPS_ENABLE)
1678                         seq_puts(m, "Currently: enabled\n");
1679                 else
1680                         seq_puts(m, "Currently: disabled\n");
1681         }
1682
1683         intel_runtime_pm_put(dev_priv, wakeref);
1684
1685         return 0;
1686 }
1687
1688 static int i915_sr_status(struct seq_file *m, void *unused)
1689 {
1690         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1691         intel_wakeref_t wakeref;
1692         bool sr_enabled = false;
1693
1694         wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
1695
1696         if (INTEL_GEN(dev_priv) >= 9)
1697                 /* no global SR status; inspect per-plane WM */;
1698         else if (HAS_PCH_SPLIT(dev_priv))
1699                 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
1700         else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
1701                  IS_I945G(dev_priv) || IS_I945GM(dev_priv))
1702                 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
1703         else if (IS_I915GM(dev_priv))
1704                 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
1705         else if (IS_PINEVIEW(dev_priv))
1706                 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
1707         else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1708                 sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
1709
1710         intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
1711
1712         seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
1713
1714         return 0;
1715 }
1716
1717 static int i915_emon_status(struct seq_file *m, void *unused)
1718 {
1719         struct drm_i915_private *i915 = node_to_i915(m->private);
1720         intel_wakeref_t wakeref;
1721
1722         if (!IS_GEN(i915, 5))
1723                 return -ENODEV;
1724
1725         with_intel_runtime_pm(i915, wakeref) {
1726                 unsigned long temp, chipset, gfx;
1727
1728                 temp = i915_mch_val(i915);
1729                 chipset = i915_chipset_val(i915);
1730                 gfx = i915_gfx_val(i915);
1731
1732                 seq_printf(m, "GMCH temp: %ld\n", temp);
1733                 seq_printf(m, "Chipset power: %ld\n", chipset);
1734                 seq_printf(m, "GFX power: %ld\n", gfx);
1735                 seq_printf(m, "Total power: %ld\n", chipset + gfx);
1736         }
1737
1738         return 0;
1739 }
1740
1741 static int i915_ring_freq_table(struct seq_file *m, void *unused)
1742 {
1743         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1744         struct intel_rps *rps = &dev_priv->gt_pm.rps;
1745         unsigned int max_gpu_freq, min_gpu_freq;
1746         intel_wakeref_t wakeref;
1747         int gpu_freq, ia_freq;
1748         int ret;
1749
1750         if (!HAS_LLC(dev_priv))
1751                 return -ENODEV;
1752
1753         wakeref = intel_runtime_pm_get(dev_priv);
1754
1755         ret = mutex_lock_interruptible(&dev_priv->pcu_lock);
1756         if (ret)
1757                 goto out;
1758
1759         min_gpu_freq = rps->min_freq;
1760         max_gpu_freq = rps->max_freq;
1761         if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
1762                 /* Convert GT frequency to 50 HZ units */
1763                 min_gpu_freq /= GEN9_FREQ_SCALER;
1764                 max_gpu_freq /= GEN9_FREQ_SCALER;
1765         }
1766
1767         seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
1768
1769         for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
1770                 ia_freq = gpu_freq;
1771                 sandybridge_pcode_read(dev_priv,
1772                                        GEN6_PCODE_READ_MIN_FREQ_TABLE,
1773                                        &ia_freq);
1774                 seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
1775                            intel_gpu_freq(dev_priv, (gpu_freq *
1776                                                      (IS_GEN9_BC(dev_priv) ||
1777                                                       INTEL_GEN(dev_priv) >= 10 ?
1778                                                       GEN9_FREQ_SCALER : 1))),
1779                            ((ia_freq >> 0) & 0xff) * 100,
1780                            ((ia_freq >> 8) & 0xff) * 100);
1781         }
1782
1783         mutex_unlock(&dev_priv->pcu_lock);
1784
1785 out:
1786         intel_runtime_pm_put(dev_priv, wakeref);
1787         return ret;
1788 }
1789
1790 static int i915_opregion(struct seq_file *m, void *unused)
1791 {
1792         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1793         struct drm_device *dev = &dev_priv->drm;
1794         struct intel_opregion *opregion = &dev_priv->opregion;
1795         int ret;
1796
1797         ret = mutex_lock_interruptible(&dev->struct_mutex);
1798         if (ret)
1799                 goto out;
1800
1801         if (opregion->header)
1802                 seq_write(m, opregion->header, OPREGION_SIZE);
1803
1804         mutex_unlock(&dev->struct_mutex);
1805
1806 out:
1807         return 0;
1808 }
1809
1810 static int i915_vbt(struct seq_file *m, void *unused)
1811 {
1812         struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
1813
1814         if (opregion->vbt)
1815                 seq_write(m, opregion->vbt, opregion->vbt_size);
1816
1817         return 0;
1818 }
1819
1820 static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1821 {
1822         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1823         struct drm_device *dev = &dev_priv->drm;
1824         struct intel_framebuffer *fbdev_fb = NULL;
1825         struct drm_framebuffer *drm_fb;
1826         int ret;
1827
1828         ret = mutex_lock_interruptible(&dev->struct_mutex);
1829         if (ret)
1830                 return ret;
1831
1832 #ifdef CONFIG_DRM_FBDEV_EMULATION
1833         if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
1834                 fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
1835
1836                 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1837                            fbdev_fb->base.width,
1838                            fbdev_fb->base.height,
1839                            fbdev_fb->base.format->depth,
1840                            fbdev_fb->base.format->cpp[0] * 8,
1841                            fbdev_fb->base.modifier,
1842                            drm_framebuffer_read_refcount(&fbdev_fb->base));
1843                 describe_obj(m, intel_fb_obj(&fbdev_fb->base));
1844                 seq_putc(m, '\n');
1845         }
1846 #endif
1847
1848         mutex_lock(&dev->mode_config.fb_lock);
1849         drm_for_each_fb(drm_fb, dev) {
1850                 struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
1851                 if (fb == fbdev_fb)
1852                         continue;
1853
1854                 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1855                            fb->base.width,
1856                            fb->base.height,
1857                            fb->base.format->depth,
1858                            fb->base.format->cpp[0] * 8,
1859                            fb->base.modifier,
1860                            drm_framebuffer_read_refcount(&fb->base));
1861                 describe_obj(m, intel_fb_obj(&fb->base));
1862                 seq_putc(m, '\n');
1863         }
1864         mutex_unlock(&dev->mode_config.fb_lock);
1865         mutex_unlock(&dev->struct_mutex);
1866
1867         return 0;
1868 }
1869
1870 static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
1871 {
1872         seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, emit: %u)",
1873                    ring->space, ring->head, ring->tail, ring->emit);
1874 }
1875
1876 static int i915_context_status(struct seq_file *m, void *unused)
1877 {
1878         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1879         struct drm_device *dev = &dev_priv->drm;
1880         struct i915_gem_context *ctx;
1881         int ret;
1882
1883         ret = mutex_lock_interruptible(&dev->struct_mutex);
1884         if (ret)
1885                 return ret;
1886
1887         list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
1888                 struct intel_context *ce;
1889
1890                 seq_puts(m, "HW context ");
1891                 if (!list_empty(&ctx->hw_id_link))
1892                         seq_printf(m, "%x [pin %u]", ctx->hw_id,
1893                                    atomic_read(&ctx->hw_id_pin_count));
1894                 if (ctx->pid) {
1895                         struct task_struct *task;
1896
1897                         task = get_pid_task(ctx->pid, PIDTYPE_PID);
1898                         if (task) {
1899                                 seq_printf(m, "(%s [%d]) ",
1900                                            task->comm, task->pid);
1901                                 put_task_struct(task);
1902                         }
1903                 } else if (IS_ERR(ctx->file_priv)) {
1904                         seq_puts(m, "(deleted) ");
1905                 } else {
1906                         seq_puts(m, "(kernel) ");
1907                 }
1908
1909                 seq_putc(m, ctx->remap_slice ? 'R' : 'r');
1910                 seq_putc(m, '\n');
1911
1912                 list_for_each_entry(ce, &ctx->active_engines, active_link) {
1913                         seq_printf(m, "%s: ", ce->engine->name);
1914                         if (ce->state)
1915                                 describe_obj(m, ce->state->obj);
1916                         if (ce->ring)
1917                                 describe_ctx_ring(m, ce->ring);
1918                         seq_putc(m, '\n');
1919                 }
1920
1921                 seq_putc(m, '\n');
1922         }
1923
1924         mutex_unlock(&dev->struct_mutex);
1925
1926         return 0;
1927 }
1928
1929 static const char *swizzle_string(unsigned swizzle)
1930 {
1931         switch (swizzle) {
1932         case I915_BIT_6_SWIZZLE_NONE:
1933                 return "none";
1934         case I915_BIT_6_SWIZZLE_9:
1935                 return "bit9";
1936         case I915_BIT_6_SWIZZLE_9_10:
1937                 return "bit9/bit10";
1938         case I915_BIT_6_SWIZZLE_9_11:
1939                 return "bit9/bit11";
1940         case I915_BIT_6_SWIZZLE_9_10_11:
1941                 return "bit9/bit10/bit11";
1942         case I915_BIT_6_SWIZZLE_9_17:
1943                 return "bit9/bit17";
1944         case I915_BIT_6_SWIZZLE_9_10_17:
1945                 return "bit9/bit10/bit17";
1946         case I915_BIT_6_SWIZZLE_UNKNOWN:
1947                 return "unknown";
1948         }
1949
1950         return "bug";
1951 }
1952
1953 static int i915_swizzle_info(struct seq_file *m, void *data)
1954 {
1955         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1956         intel_wakeref_t wakeref;
1957
1958         wakeref = intel_runtime_pm_get(dev_priv);
1959
1960         seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
1961                    swizzle_string(dev_priv->mm.bit_6_swizzle_x));
1962         seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
1963                    swizzle_string(dev_priv->mm.bit_6_swizzle_y));
1964
1965         if (IS_GEN_RANGE(dev_priv, 3, 4)) {
1966                 seq_printf(m, "DDC = 0x%08x\n",
1967                            I915_READ(DCC));
1968                 seq_printf(m, "DDC2 = 0x%08x\n",
1969                            I915_READ(DCC2));
1970                 seq_printf(m, "C0DRB3 = 0x%04x\n",
1971                            I915_READ16(C0DRB3));
1972                 seq_printf(m, "C1DRB3 = 0x%04x\n",
1973                            I915_READ16(C1DRB3));
1974         } else if (INTEL_GEN(dev_priv) >= 6) {
1975                 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
1976                            I915_READ(MAD_DIMM_C0));
1977                 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
1978                            I915_READ(MAD_DIMM_C1));
1979                 seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
1980                            I915_READ(MAD_DIMM_C2));
1981                 seq_printf(m, "TILECTL = 0x%08x\n",
1982                            I915_READ(TILECTL));
1983                 if (INTEL_GEN(dev_priv) >= 8)
1984                         seq_printf(m, "GAMTARBMODE = 0x%08x\n",
1985                                    I915_READ(GAMTARBMODE));
1986                 else
1987                         seq_printf(m, "ARB_MODE = 0x%08x\n",
1988                                    I915_READ(ARB_MODE));
1989                 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
1990                            I915_READ(DISP_ARB_CTL));
1991         }
1992
1993         if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
1994                 seq_puts(m, "L-shaped memory detected\n");
1995
1996         intel_runtime_pm_put(dev_priv, wakeref);
1997
1998         return 0;
1999 }
2000
2001 static const char *rps_power_to_str(unsigned int power)
2002 {
2003         static const char * const strings[] = {
2004                 [LOW_POWER] = "low power",
2005                 [BETWEEN] = "mixed",
2006                 [HIGH_POWER] = "high power",
2007         };
2008
2009         if (power >= ARRAY_SIZE(strings) || !strings[power])
2010                 return "unknown";
2011
2012         return strings[power];
2013 }
2014
2015 static int i915_rps_boost_info(struct seq_file *m, void *data)
2016 {
2017         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2018         struct intel_rps *rps = &dev_priv->gt_pm.rps;
2019         u32 act_freq = rps->cur_freq;
2020         intel_wakeref_t wakeref;
2021
2022         with_intel_runtime_pm_if_in_use(dev_priv, wakeref) {
2023                 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
2024                         mutex_lock(&dev_priv->pcu_lock);
2025                         act_freq = vlv_punit_read(dev_priv,
2026                                                   PUNIT_REG_GPU_FREQ_STS);
2027                         act_freq = (act_freq >> 8) & 0xff;
2028                         mutex_unlock(&dev_priv->pcu_lock);
2029                 } else {
2030                         act_freq = intel_get_cagf(dev_priv,
2031                                                   I915_READ(GEN6_RPSTAT1));
2032                 }
2033         }
2034
2035         seq_printf(m, "RPS enabled? %d\n", rps->enabled);
2036         seq_printf(m, "GPU busy? %s [%d requests]\n",
2037                    yesno(dev_priv->gt.awake), dev_priv->gt.active_requests);
2038         seq_printf(m, "Boosts outstanding? %d\n",
2039                    atomic_read(&rps->num_waiters));
2040         seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
2041         seq_printf(m, "Frequency requested %d, actual %d\n",
2042                    intel_gpu_freq(dev_priv, rps->cur_freq),
2043                    intel_gpu_freq(dev_priv, act_freq));
2044         seq_printf(m, "  min hard:%d, soft:%d; max soft:%d, hard:%d\n",
2045                    intel_gpu_freq(dev_priv, rps->min_freq),
2046                    intel_gpu_freq(dev_priv, rps->min_freq_softlimit),
2047                    intel_gpu_freq(dev_priv, rps->max_freq_softlimit),
2048                    intel_gpu_freq(dev_priv, rps->max_freq));
2049         seq_printf(m, "  idle:%d, efficient:%d, boost:%d\n",
2050                    intel_gpu_freq(dev_priv, rps->idle_freq),
2051                    intel_gpu_freq(dev_priv, rps->efficient_freq),
2052                    intel_gpu_freq(dev_priv, rps->boost_freq));
2053
2054         seq_printf(m, "Wait boosts: %d\n", atomic_read(&rps->boosts));
2055
2056         if (INTEL_GEN(dev_priv) >= 6 &&
2057             rps->enabled &&
2058             dev_priv->gt.active_requests) {
2059                 u32 rpup, rpupei;
2060                 u32 rpdown, rpdownei;
2061
2062                 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
2063                 rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
2064                 rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
2065                 rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
2066                 rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
2067                 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
2068
2069                 seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
2070                            rps_power_to_str(rps->power.mode));
2071                 seq_printf(m, "  Avg. up: %d%% [above threshold? %d%%]\n",
2072                            rpup && rpupei ? 100 * rpup / rpupei : 0,
2073                            rps->power.up_threshold);
2074                 seq_printf(m, "  Avg. down: %d%% [below threshold? %d%%]\n",
2075                            rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
2076                            rps->power.down_threshold);
2077         } else {
2078                 seq_puts(m, "\nRPS Autotuning inactive\n");
2079         }
2080
2081         return 0;
2082 }
2083
2084 static int i915_llc(struct seq_file *m, void *data)
2085 {
2086         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2087         const bool edram = INTEL_GEN(dev_priv) > 8;
2088
2089         seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv)));
2090         seq_printf(m, "%s: %lluMB\n", edram ? "eDRAM" : "eLLC",
2091                    intel_uncore_edram_size(dev_priv)/1024/1024);
2092
2093         return 0;
2094 }
2095
2096 static int i915_huc_load_status_info(struct seq_file *m, void *data)
2097 {
2098         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2099         intel_wakeref_t wakeref;
2100         struct drm_printer p;
2101
2102         if (!HAS_HUC(dev_priv))
2103                 return -ENODEV;
2104
2105         p = drm_seq_file_printer(m);
2106         intel_uc_fw_dump(&dev_priv->huc.fw, &p);
2107
2108         with_intel_runtime_pm(dev_priv, wakeref)
2109                 seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
2110
2111         return 0;
2112 }
2113
2114 static int i915_guc_load_status_info(struct seq_file *m, void *data)
2115 {
2116         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2117         intel_wakeref_t wakeref;
2118         struct drm_printer p;
2119
2120         if (!HAS_GUC(dev_priv))
2121                 return -ENODEV;
2122
2123         p = drm_seq_file_printer(m);
2124         intel_uc_fw_dump(&dev_priv->guc.fw, &p);
2125
2126         with_intel_runtime_pm(dev_priv, wakeref) {
2127                 u32 tmp = I915_READ(GUC_STATUS);
2128                 u32 i;
2129
2130                 seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
2131                 seq_printf(m, "\tBootrom status = 0x%x\n",
2132                            (tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
2133                 seq_printf(m, "\tuKernel status = 0x%x\n",
2134                            (tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
2135                 seq_printf(m, "\tMIA Core status = 0x%x\n",
2136                            (tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
2137                 seq_puts(m, "\nScratch registers:\n");
2138                 for (i = 0; i < 16; i++) {
2139                         seq_printf(m, "\t%2d: \t0x%x\n",
2140                                    i, I915_READ(SOFT_SCRATCH(i)));
2141                 }
2142         }
2143
2144         return 0;
2145 }
2146
2147 static const char *
2148 stringify_guc_log_type(enum guc_log_buffer_type type)
2149 {
2150         switch (type) {
2151         case GUC_ISR_LOG_BUFFER:
2152                 return "ISR";
2153         case GUC_DPC_LOG_BUFFER:
2154                 return "DPC";
2155         case GUC_CRASH_DUMP_LOG_BUFFER:
2156                 return "CRASH";
2157         default:
2158                 MISSING_CASE(type);
2159         }
2160
2161         return "";
2162 }
2163
2164 static void i915_guc_log_info(struct seq_file *m,
2165                               struct drm_i915_private *dev_priv)
2166 {
2167         struct intel_guc_log *log = &dev_priv->guc.log;
2168         enum guc_log_buffer_type type;
2169
2170         if (!intel_guc_log_relay_enabled(log)) {
2171                 seq_puts(m, "GuC log relay disabled\n");
2172                 return;
2173         }
2174
2175         seq_puts(m, "GuC logging stats:\n");
2176
2177         seq_printf(m, "\tRelay full count: %u\n",
2178                    log->relay.full_count);
2179
2180         for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
2181                 seq_printf(m, "\t%s:\tflush count %10u, overflow count %10u\n",
2182                            stringify_guc_log_type(type),
2183                            log->stats[type].flush,
2184                            log->stats[type].sampled_overflow);
2185         }
2186 }
2187
2188 static void i915_guc_client_info(struct seq_file *m,
2189                                  struct drm_i915_private *dev_priv,
2190                                  struct intel_guc_client *client)
2191 {
2192         struct intel_engine_cs *engine;
2193         enum intel_engine_id id;
2194         u64 tot = 0;
2195
2196         seq_printf(m, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n",
2197                 client->priority, client->stage_id, client->proc_desc_offset);
2198         seq_printf(m, "\tDoorbell id %d, offset: 0x%lx\n",
2199                 client->doorbell_id, client->doorbell_offset);
2200
2201         for_each_engine(engine, dev_priv, id) {
2202                 u64 submissions = client->submissions[id];
2203                 tot += submissions;
2204                 seq_printf(m, "\tSubmissions: %llu %s\n",
2205                                 submissions, engine->name);
2206         }
2207         seq_printf(m, "\tTotal: %llu\n", tot);
2208 }
2209
2210 static int i915_guc_info(struct seq_file *m, void *data)
2211 {
2212         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2213         const struct intel_guc *guc = &dev_priv->guc;
2214
2215         if (!USES_GUC(dev_priv))
2216                 return -ENODEV;
2217
2218         i915_guc_log_info(m, dev_priv);
2219
2220         if (!USES_GUC_SUBMISSION(dev_priv))
2221                 return 0;
2222
2223         GEM_BUG_ON(!guc->execbuf_client);
2224
2225         seq_printf(m, "\nDoorbell map:\n");
2226         seq_printf(m, "\t%*pb\n", GUC_NUM_DOORBELLS, guc->doorbell_bitmap);
2227         seq_printf(m, "Doorbell next cacheline: 0x%x\n", guc->db_cacheline);
2228
2229         seq_printf(m, "\nGuC execbuf client @ %p:\n", guc->execbuf_client);
2230         i915_guc_client_info(m, dev_priv, guc->execbuf_client);
2231         if (guc->preempt_client) {
2232                 seq_printf(m, "\nGuC preempt client @ %p:\n",
2233                            guc->preempt_client);
2234                 i915_guc_client_info(m, dev_priv, guc->preempt_client);
2235         }
2236
2237         /* Add more as required ... */
2238
2239         return 0;
2240 }
2241
2242 static int i915_guc_stage_pool(struct seq_file *m, void *data)
2243 {
2244         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2245         const struct intel_guc *guc = &dev_priv->guc;
2246         struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr;
2247         struct intel_guc_client *client = guc->execbuf_client;
2248         unsigned int tmp;
2249         int index;
2250
2251         if (!USES_GUC_SUBMISSION(dev_priv))
2252                 return -ENODEV;
2253
2254         for (index = 0; index < GUC_MAX_STAGE_DESCRIPTORS; index++, desc++) {
2255                 struct intel_engine_cs *engine;
2256
2257                 if (!(desc->attribute & GUC_STAGE_DESC_ATTR_ACTIVE))
2258                         continue;
2259
2260                 seq_printf(m, "GuC stage descriptor %u:\n", index);
2261                 seq_printf(m, "\tIndex: %u\n", desc->stage_id);
2262                 seq_printf(m, "\tAttribute: 0x%x\n", desc->attribute);
2263                 seq_printf(m, "\tPriority: %d\n", desc->priority);
2264                 seq_printf(m, "\tDoorbell id: %d\n", desc->db_id);
2265                 seq_printf(m, "\tEngines used: 0x%x\n",
2266                            desc->engines_used);
2267                 seq_printf(m, "\tDoorbell trigger phy: 0x%llx, cpu: 0x%llx, uK: 0x%x\n",
2268                            desc->db_trigger_phy,
2269                            desc->db_trigger_cpu,
2270                            desc->db_trigger_uk);
2271                 seq_printf(m, "\tProcess descriptor: 0x%x\n",
2272                            desc->process_desc);
2273                 seq_printf(m, "\tWorkqueue address: 0x%x, size: 0x%x\n",
2274                            desc->wq_addr, desc->wq_size);
2275                 seq_putc(m, '\n');
2276
2277                 for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
2278                         u32 guc_engine_id = engine->guc_id;
2279                         struct guc_execlist_context *lrc =
2280                                                 &desc->lrc[guc_engine_id];
2281
2282                         seq_printf(m, "\t%s LRC:\n", engine->name);
2283                         seq_printf(m, "\t\tContext desc: 0x%x\n",
2284                                    lrc->context_desc);
2285                         seq_printf(m, "\t\tContext id: 0x%x\n", lrc->context_id);
2286                         seq_printf(m, "\t\tLRCA: 0x%x\n", lrc->ring_lrca);
2287                         seq_printf(m, "\t\tRing begin: 0x%x\n", lrc->ring_begin);
2288                         seq_printf(m, "\t\tRing end: 0x%x\n", lrc->ring_end);
2289                         seq_putc(m, '\n');
2290                 }
2291         }
2292
2293         return 0;
2294 }
2295
2296 static int i915_guc_log_dump(struct seq_file *m, void *data)
2297 {
2298         struct drm_info_node *node = m->private;
2299         struct drm_i915_private *dev_priv = node_to_i915(node);
2300         bool dump_load_err = !!node->info_ent->data;
2301         struct drm_i915_gem_object *obj = NULL;
2302         u32 *log;
2303         int i = 0;
2304
2305         if (!HAS_GUC(dev_priv))
2306                 return -ENODEV;
2307
2308         if (dump_load_err)
2309                 obj = dev_priv->guc.load_err_log;
2310         else if (dev_priv->guc.log.vma)
2311                 obj = dev_priv->guc.log.vma->obj;
2312
2313         if (!obj)
2314                 return 0;
2315
2316         log = i915_gem_object_pin_map(obj, I915_MAP_WC);
2317         if (IS_ERR(log)) {
2318                 DRM_DEBUG("Failed to pin object\n");
2319                 seq_puts(m, "(log data unaccessible)\n");
2320                 return PTR_ERR(log);
2321         }
2322
2323         for (i = 0; i < obj->base.size / sizeof(u32); i += 4)
2324                 seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n",
2325                            *(log + i), *(log + i + 1),
2326                            *(log + i + 2), *(log + i + 3));
2327
2328         seq_putc(m, '\n');
2329
2330         i915_gem_object_unpin_map(obj);
2331
2332         return 0;
2333 }
2334
2335 static int i915_guc_log_level_get(void *data, u64 *val)
2336 {
2337         struct drm_i915_private *dev_priv = data;
2338
2339         if (!USES_GUC(dev_priv))
2340                 return -ENODEV;
2341
2342         *val = intel_guc_log_get_level(&dev_priv->guc.log);
2343
2344         return 0;
2345 }
2346
2347 static int i915_guc_log_level_set(void *data, u64 val)
2348 {
2349         struct drm_i915_private *dev_priv = data;
2350
2351         if (!USES_GUC(dev_priv))
2352                 return -ENODEV;
2353
2354         return intel_guc_log_set_level(&dev_priv->guc.log, val);
2355 }
2356
2357 DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_level_fops,
2358                         i915_guc_log_level_get, i915_guc_log_level_set,
2359                         "%lld\n");
2360
2361 static int i915_guc_log_relay_open(struct inode *inode, struct file *file)
2362 {
2363         struct drm_i915_private *dev_priv = inode->i_private;
2364
2365         if (!USES_GUC(dev_priv))
2366                 return -ENODEV;
2367
2368         file->private_data = &dev_priv->guc.log;
2369
2370         return intel_guc_log_relay_open(&dev_priv->guc.log);
2371 }
2372
2373 static ssize_t
2374 i915_guc_log_relay_write(struct file *filp,
2375                          const char __user *ubuf,
2376                          size_t cnt,
2377                          loff_t *ppos)
2378 {
2379         struct intel_guc_log *log = filp->private_data;
2380
2381         intel_guc_log_relay_flush(log);
2382
2383         return cnt;
2384 }
2385
2386 static int i915_guc_log_relay_release(struct inode *inode, struct file *file)
2387 {
2388         struct drm_i915_private *dev_priv = inode->i_private;
2389
2390         intel_guc_log_relay_close(&dev_priv->guc.log);
2391
2392         return 0;
2393 }
2394
2395 static const struct file_operations i915_guc_log_relay_fops = {
2396         .owner = THIS_MODULE,
2397         .open = i915_guc_log_relay_open,
2398         .write = i915_guc_log_relay_write,
2399         .release = i915_guc_log_relay_release,
2400 };
2401
2402 static int i915_psr_sink_status_show(struct seq_file *m, void *data)
2403 {
2404         u8 val;
2405         static const char * const sink_status[] = {
2406                 "inactive",
2407                 "transition to active, capture and display",
2408                 "active, display from RFB",
2409                 "active, capture and display on sink device timings",
2410                 "transition to inactive, capture and display, timing re-sync",
2411                 "reserved",
2412                 "reserved",
2413                 "sink internal error",
2414         };
2415         struct drm_connector *connector = m->private;
2416         struct drm_i915_private *dev_priv = to_i915(connector->dev);
2417         struct intel_dp *intel_dp =
2418                 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
2419         int ret;
2420
2421         if (!CAN_PSR(dev_priv)) {
2422                 seq_puts(m, "PSR Unsupported\n");
2423                 return -ENODEV;
2424         }
2425
2426         if (connector->status != connector_status_connected)
2427                 return -ENODEV;
2428
2429         ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
2430
2431         if (ret == 1) {
2432                 const char *str = "unknown";
2433
2434                 val &= DP_PSR_SINK_STATE_MASK;
2435                 if (val < ARRAY_SIZE(sink_status))
2436                         str = sink_status[val];
2437                 seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
2438         } else {
2439                 return ret;
2440         }
2441
2442         return 0;
2443 }
2444 DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
2445
2446 static void
2447 psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
2448 {
2449         u32 val, status_val;
2450         const char *status = "unknown";
2451
2452         if (dev_priv->psr.psr2_enabled) {
2453                 static const char * const live_status[] = {
2454                         "IDLE",
2455                         "CAPTURE",
2456                         "CAPTURE_FS",
2457                         "SLEEP",
2458                         "BUFON_FW",
2459                         "ML_UP",
2460                         "SU_STANDBY",
2461                         "FAST_SLEEP",
2462                         "DEEP_SLEEP",
2463                         "BUF_ON",
2464                         "TG_ON"
2465                 };
2466                 val = I915_READ(EDP_PSR2_STATUS);
2467                 status_val = (val & EDP_PSR2_STATUS_STATE_MASK) >>
2468                               EDP_PSR2_STATUS_STATE_SHIFT;
2469                 if (status_val < ARRAY_SIZE(live_status))
2470                         status = live_status[status_val];
2471         } else {
2472                 static const char * const live_status[] = {
2473                         "IDLE",
2474                         "SRDONACK",
2475                         "SRDENT",
2476                         "BUFOFF",
2477                         "BUFON",
2478                         "AUXACK",
2479                         "SRDOFFACK",
2480                         "SRDENT_ON",
2481                 };
2482                 val = I915_READ(EDP_PSR_STATUS);
2483                 status_val = (val & EDP_PSR_STATUS_STATE_MASK) >>
2484                               EDP_PSR_STATUS_STATE_SHIFT;
2485                 if (status_val < ARRAY_SIZE(live_status))
2486                         status = live_status[status_val];
2487         }
2488
2489         seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val);
2490 }
2491
2492 static int i915_edp_psr_status(struct seq_file *m, void *data)
2493 {
2494         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2495         struct i915_psr *psr = &dev_priv->psr;
2496         intel_wakeref_t wakeref;
2497         const char *status;
2498         bool enabled;
2499         u32 val;
2500
2501         if (!HAS_PSR(dev_priv))
2502                 return -ENODEV;
2503
2504         seq_printf(m, "Sink support: %s", yesno(psr->sink_support));
2505         if (psr->dp)
2506                 seq_printf(m, " [0x%02x]", psr->dp->psr_dpcd[0]);
2507         seq_puts(m, "\n");
2508
2509         if (!psr->sink_support)
2510                 return 0;
2511
2512         wakeref = intel_runtime_pm_get(dev_priv);
2513         mutex_lock(&psr->lock);
2514
2515         if (psr->enabled)
2516                 status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
2517         else
2518                 status = "disabled";
2519         seq_printf(m, "PSR mode: %s\n", status);
2520
2521         if (!psr->enabled)
2522                 goto unlock;
2523
2524         if (psr->psr2_enabled) {
2525                 val = I915_READ(EDP_PSR2_CTL);
2526                 enabled = val & EDP_PSR2_ENABLE;
2527         } else {
2528                 val = I915_READ(EDP_PSR_CTL);
2529                 enabled = val & EDP_PSR_ENABLE;
2530         }
2531         seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
2532                    enableddisabled(enabled), val);
2533         psr_source_status(dev_priv, m);
2534         seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
2535                    psr->busy_frontbuffer_bits);
2536
2537         /*
2538          * SKL+ Perf counter is reset to 0 everytime DC state is entered
2539          */
2540         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
2541                 val = I915_READ(EDP_PSR_PERF_CNT) & EDP_PSR_PERF_CNT_MASK;
2542                 seq_printf(m, "Performance counter: %u\n", val);
2543         }
2544
2545         if (psr->debug & I915_PSR_DEBUG_IRQ) {
2546                 seq_printf(m, "Last attempted entry at: %lld\n",
2547                            psr->last_entry_attempt);
2548                 seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
2549         }
2550
2551         if (psr->psr2_enabled) {
2552                 u32 su_frames_val[3];
2553                 int frame;
2554
2555                 /*
2556                  * Reading all 3 registers before hand to minimize crossing a
2557                  * frame boundary between register reads
2558                  */
2559                 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3)
2560                         su_frames_val[frame / 3] = I915_READ(PSR2_SU_STATUS(frame));
2561
2562                 seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
2563
2564                 for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
2565                         u32 su_blocks;
2566
2567                         su_blocks = su_frames_val[frame / 3] &
2568                                     PSR2_SU_STATUS_MASK(frame);
2569                         su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
2570                         seq_printf(m, "%d\t%d\n", frame, su_blocks);
2571                 }
2572         }
2573
2574 unlock:
2575         mutex_unlock(&psr->lock);
2576         intel_runtime_pm_put(dev_priv, wakeref);
2577
2578         return 0;
2579 }
2580
2581 static int
2582 i915_edp_psr_debug_set(void *data, u64 val)
2583 {
2584         struct drm_i915_private *dev_priv = data;
2585         intel_wakeref_t wakeref;
2586         int ret;
2587
2588         if (!CAN_PSR(dev_priv))
2589                 return -ENODEV;
2590
2591         DRM_DEBUG_KMS("Setting PSR debug to %llx\n", val);
2592
2593         wakeref = intel_runtime_pm_get(dev_priv);
2594
2595         ret = intel_psr_debug_set(dev_priv, val);
2596
2597         intel_runtime_pm_put(dev_priv, wakeref);
2598
2599         return ret;
2600 }
2601
2602 static int
2603 i915_edp_psr_debug_get(void *data, u64 *val)
2604 {
2605         struct drm_i915_private *dev_priv = data;
2606
2607         if (!CAN_PSR(dev_priv))
2608                 return -ENODEV;
2609
2610         *val = READ_ONCE(dev_priv->psr.debug);
2611         return 0;
2612 }
2613
2614 DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
2615                         i915_edp_psr_debug_get, i915_edp_psr_debug_set,
2616                         "%llu\n");
2617
2618 static int i915_energy_uJ(struct seq_file *m, void *data)
2619 {
2620         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2621         unsigned long long power;
2622         intel_wakeref_t wakeref;
2623         u32 units;
2624
2625         if (INTEL_GEN(dev_priv) < 6)
2626                 return -ENODEV;
2627
2628         if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power))
2629                 return -ENODEV;
2630
2631         units = (power & 0x1f00) >> 8;
2632         with_intel_runtime_pm(dev_priv, wakeref)
2633                 power = I915_READ(MCH_SECP_NRG_STTS);
2634
2635         power = (1000000 * power) >> units; /* convert to uJ */
2636         seq_printf(m, "%llu", power);
2637
2638         return 0;
2639 }
2640
2641 static int i915_runtime_pm_status(struct seq_file *m, void *unused)
2642 {
2643         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2644         struct pci_dev *pdev = dev_priv->drm.pdev;
2645
2646         if (!HAS_RUNTIME_PM(dev_priv))
2647                 seq_puts(m, "Runtime power management not supported\n");
2648
2649         seq_printf(m, "Runtime power status: %s\n",
2650                    enableddisabled(!dev_priv->power_domains.wakeref));
2651
2652         seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->gt.awake));
2653         seq_printf(m, "IRQs disabled: %s\n",
2654                    yesno(!intel_irqs_enabled(dev_priv)));
2655 #ifdef CONFIG_PM
2656         seq_printf(m, "Usage count: %d\n",
2657                    atomic_read(&dev_priv->drm.dev->power.usage_count));
2658 #else
2659         seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
2660 #endif
2661         seq_printf(m, "PCI device power state: %s [%d]\n",
2662                    pci_power_name(pdev->current_state),
2663                    pdev->current_state);
2664
2665         if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)) {
2666                 struct drm_printer p = drm_seq_file_printer(m);
2667
2668                 print_intel_runtime_pm_wakeref(dev_priv, &p);
2669         }
2670
2671         return 0;
2672 }
2673
2674 static int i915_power_domain_info(struct seq_file *m, void *unused)
2675 {
2676         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2677         struct i915_power_domains *power_domains = &dev_priv->power_domains;
2678         int i;
2679
2680         mutex_lock(&power_domains->lock);
2681
2682         seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
2683         for (i = 0; i < power_domains->power_well_count; i++) {
2684                 struct i915_power_well *power_well;
2685                 enum intel_display_power_domain power_domain;
2686
2687                 power_well = &power_domains->power_wells[i];
2688                 seq_printf(m, "%-25s %d\n", power_well->desc->name,
2689                            power_well->count);
2690
2691                 for_each_power_domain(power_domain, power_well->desc->domains)
2692                         seq_printf(m, "  %-23s %d\n",
2693                                  intel_display_power_domain_str(power_domain),
2694                                  power_domains->domain_use_count[power_domain]);
2695         }
2696
2697         mutex_unlock(&power_domains->lock);
2698
2699         return 0;
2700 }
2701
2702 static int i915_dmc_info(struct seq_file *m, void *unused)
2703 {
2704         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2705         intel_wakeref_t wakeref;
2706         struct intel_csr *csr;
2707
2708         if (!HAS_CSR(dev_priv))
2709                 return -ENODEV;
2710
2711         csr = &dev_priv->csr;
2712
2713         wakeref = intel_runtime_pm_get(dev_priv);
2714
2715         seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
2716         seq_printf(m, "path: %s\n", csr->fw_path);
2717
2718         if (!csr->dmc_payload)
2719                 goto out;
2720
2721         seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
2722                    CSR_VERSION_MINOR(csr->version));
2723
2724         if (WARN_ON(INTEL_GEN(dev_priv) > 11))
2725                 goto out;
2726
2727         seq_printf(m, "DC3 -> DC5 count: %d\n",
2728                    I915_READ(IS_BROXTON(dev_priv) ? BXT_CSR_DC3_DC5_COUNT :
2729                                                     SKL_CSR_DC3_DC5_COUNT));
2730         if (!IS_GEN9_LP(dev_priv))
2731                 seq_printf(m, "DC5 -> DC6 count: %d\n",
2732                            I915_READ(SKL_CSR_DC5_DC6_COUNT));
2733
2734 out:
2735         seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
2736         seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE));
2737         seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL));
2738
2739         intel_runtime_pm_put(dev_priv, wakeref);
2740
2741         return 0;
2742 }
2743
2744 static void intel_seq_print_mode(struct seq_file *m, int tabs,
2745                                  struct drm_display_mode *mode)
2746 {
2747         int i;
2748
2749         for (i = 0; i < tabs; i++)
2750                 seq_putc(m, '\t');
2751
2752         seq_printf(m, DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
2753 }
2754
2755 static void intel_encoder_info(struct seq_file *m,
2756                                struct intel_crtc *intel_crtc,
2757                                struct intel_encoder *intel_encoder)
2758 {
2759         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2760         struct drm_device *dev = &dev_priv->drm;
2761         struct drm_crtc *crtc = &intel_crtc->base;
2762         struct intel_connector *intel_connector;
2763         struct drm_encoder *encoder;
2764
2765         encoder = &intel_encoder->base;
2766         seq_printf(m, "\tencoder %d: type: %s, connectors:\n",
2767                    encoder->base.id, encoder->name);
2768         for_each_connector_on_encoder(dev, encoder, intel_connector) {
2769                 struct drm_connector *connector = &intel_connector->base;
2770                 seq_printf(m, "\t\tconnector %d: type: %s, status: %s",
2771                            connector->base.id,
2772                            connector->name,
2773                            drm_get_connector_status_name(connector->status));
2774                 if (connector->status == connector_status_connected) {
2775                         struct drm_display_mode *mode = &crtc->mode;
2776                         seq_printf(m, ", mode:\n");
2777                         intel_seq_print_mode(m, 2, mode);
2778                 } else {
2779                         seq_putc(m, '\n');
2780                 }
2781         }
2782 }
2783
2784 static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2785 {
2786         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2787         struct drm_device *dev = &dev_priv->drm;
2788         struct drm_crtc *crtc = &intel_crtc->base;
2789         struct intel_encoder *intel_encoder;
2790         struct drm_plane_state *plane_state = crtc->primary->state;
2791         struct drm_framebuffer *fb = plane_state->fb;
2792
2793         if (fb)
2794                 seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
2795                            fb->base.id, plane_state->src_x >> 16,
2796                            plane_state->src_y >> 16, fb->width, fb->height);
2797         else
2798                 seq_puts(m, "\tprimary plane disabled\n");
2799         for_each_encoder_on_crtc(dev, crtc, intel_encoder)
2800                 intel_encoder_info(m, intel_crtc, intel_encoder);
2801 }
2802
2803 static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
2804 {
2805         struct drm_display_mode *mode = panel->fixed_mode;
2806
2807         seq_printf(m, "\tfixed mode:\n");
2808         intel_seq_print_mode(m, 2, mode);
2809 }
2810
2811 static void intel_dp_info(struct seq_file *m,
2812                           struct intel_connector *intel_connector)
2813 {
2814         struct intel_encoder *intel_encoder = intel_connector->encoder;
2815         struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
2816
2817         seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
2818         seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
2819         if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
2820                 intel_panel_info(m, &intel_connector->panel);
2821
2822         drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
2823                                 &intel_dp->aux);
2824 }
2825
2826 static void intel_dp_mst_info(struct seq_file *m,
2827                           struct intel_connector *intel_connector)
2828 {
2829         struct intel_encoder *intel_encoder = intel_connector->encoder;
2830         struct intel_dp_mst_encoder *intel_mst =
2831                 enc_to_mst(&intel_encoder->base);
2832         struct intel_digital_port *intel_dig_port = intel_mst->primary;
2833         struct intel_dp *intel_dp = &intel_dig_port->dp;
2834         bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
2835                                         intel_connector->port);
2836
2837         seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
2838 }
2839
2840 static void intel_hdmi_info(struct seq_file *m,
2841                             struct intel_connector *intel_connector)
2842 {
2843         struct intel_encoder *intel_encoder = intel_connector->encoder;
2844         struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
2845
2846         seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
2847 }
2848
2849 static void intel_lvds_info(struct seq_file *m,
2850                             struct intel_connector *intel_connector)
2851 {
2852         intel_panel_info(m, &intel_connector->panel);
2853 }
2854
2855 static void intel_connector_info(struct seq_file *m,
2856                                  struct drm_connector *connector)
2857 {
2858         struct intel_connector *intel_connector = to_intel_connector(connector);
2859         struct intel_encoder *intel_encoder = intel_connector->encoder;
2860         struct drm_display_mode *mode;
2861
2862         seq_printf(m, "connector %d: type %s, status: %s\n",
2863                    connector->base.id, connector->name,
2864                    drm_get_connector_status_name(connector->status));
2865
2866         if (connector->status == connector_status_disconnected)
2867                 return;
2868
2869         seq_printf(m, "\tname: %s\n", connector->display_info.name);
2870         seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
2871                    connector->display_info.width_mm,
2872                    connector->display_info.height_mm);
2873         seq_printf(m, "\tsubpixel order: %s\n",
2874                    drm_get_subpixel_order_name(connector->display_info.subpixel_order));
2875         seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev);
2876
2877         if (!intel_encoder)
2878                 return;
2879
2880         switch (connector->connector_type) {
2881         case DRM_MODE_CONNECTOR_DisplayPort:
2882         case DRM_MODE_CONNECTOR_eDP:
2883                 if (intel_encoder->type == INTEL_OUTPUT_DP_MST)
2884                         intel_dp_mst_info(m, intel_connector);
2885                 else
2886                         intel_dp_info(m, intel_connector);
2887                 break;
2888         case DRM_MODE_CONNECTOR_LVDS:
2889                 if (intel_encoder->type == INTEL_OUTPUT_LVDS)
2890                         intel_lvds_info(m, intel_connector);
2891                 break;
2892         case DRM_MODE_CONNECTOR_HDMIA:
2893                 if (intel_encoder->type == INTEL_OUTPUT_HDMI ||
2894                     intel_encoder->type == INTEL_OUTPUT_DDI)
2895                         intel_hdmi_info(m, intel_connector);
2896                 break;
2897         default:
2898                 break;
2899         }
2900
2901         seq_printf(m, "\tmodes:\n");
2902         list_for_each_entry(mode, &connector->modes, head)
2903                 intel_seq_print_mode(m, 2, mode);
2904 }
2905
2906 static const char *plane_type(enum drm_plane_type type)
2907 {
2908         switch (type) {
2909         case DRM_PLANE_TYPE_OVERLAY:
2910                 return "OVL";
2911         case DRM_PLANE_TYPE_PRIMARY:
2912                 return "PRI";
2913         case DRM_PLANE_TYPE_CURSOR:
2914                 return "CUR";
2915         /*
2916          * Deliberately omitting default: to generate compiler warnings
2917          * when a new drm_plane_type gets added.
2918          */
2919         }
2920
2921         return "unknown";
2922 }
2923
2924 static void plane_rotation(char *buf, size_t bufsize, unsigned int rotation)
2925 {
2926         /*
2927          * According to doc only one DRM_MODE_ROTATE_ is allowed but this
2928          * will print them all to visualize if the values are misused
2929          */
2930         snprintf(buf, bufsize,
2931                  "%s%s%s%s%s%s(0x%08x)",
2932                  (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
2933                  (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
2934                  (rotation & DRM_MODE_ROTATE_180) ? "180 " : "",
2935                  (rotation & DRM_MODE_ROTATE_270) ? "270 " : "",
2936                  (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
2937                  (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
2938                  rotation);
2939 }
2940
2941 static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2942 {
2943         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2944         struct drm_device *dev = &dev_priv->drm;
2945         struct intel_plane *intel_plane;
2946
2947         for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
2948                 struct drm_plane_state *state;
2949                 struct drm_plane *plane = &intel_plane->base;
2950                 struct drm_format_name_buf format_name;
2951                 char rot_str[48];
2952
2953                 if (!plane->state) {
2954                         seq_puts(m, "plane->state is NULL!\n");
2955                         continue;
2956                 }
2957
2958                 state = plane->state;
2959
2960                 if (state->fb) {
2961                         drm_get_format_name(state->fb->format->format,
2962                                             &format_name);
2963                 } else {
2964                         sprintf(format_name.str, "N/A");
2965                 }
2966
2967                 plane_rotation(rot_str, sizeof(rot_str), state->rotation);
2968
2969                 seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n",
2970                            plane->base.id,
2971                            plane_type(intel_plane->base.type),
2972                            state->crtc_x, state->crtc_y,
2973                            state->crtc_w, state->crtc_h,
2974                            (state->src_x >> 16),
2975                            ((state->src_x & 0xffff) * 15625) >> 10,
2976                            (state->src_y >> 16),
2977                            ((state->src_y & 0xffff) * 15625) >> 10,
2978                            (state->src_w >> 16),
2979                            ((state->src_w & 0xffff) * 15625) >> 10,
2980                            (state->src_h >> 16),
2981                            ((state->src_h & 0xffff) * 15625) >> 10,
2982                            format_name.str,
2983                            rot_str);
2984         }
2985 }
2986
2987 static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2988 {
2989         struct intel_crtc_state *pipe_config;
2990         int num_scalers = intel_crtc->num_scalers;
2991         int i;
2992
2993         pipe_config = to_intel_crtc_state(intel_crtc->base.state);
2994
2995         /* Not all platformas have a scaler */
2996         if (num_scalers) {
2997                 seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
2998                            num_scalers,
2999                            pipe_config->scaler_state.scaler_users,
3000                            pipe_config->scaler_state.scaler_id);
3001
3002                 for (i = 0; i < num_scalers; i++) {
3003                         struct intel_scaler *sc =
3004                                         &pipe_config->scaler_state.scalers[i];
3005
3006                         seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
3007                                    i, yesno(sc->in_use), sc->mode);
3008                 }
3009                 seq_puts(m, "\n");
3010         } else {
3011                 seq_puts(m, "\tNo scalers available on this platform\n");
3012         }
3013 }
3014
3015 static int i915_display_info(struct seq_file *m, void *unused)
3016 {
3017         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3018         struct drm_device *dev = &dev_priv->drm;
3019         struct intel_crtc *crtc;
3020         struct drm_connector *connector;
3021         struct drm_connector_list_iter conn_iter;
3022         intel_wakeref_t wakeref;
3023
3024         wakeref = intel_runtime_pm_get(dev_priv);
3025
3026         seq_printf(m, "CRTC info\n");
3027         seq_printf(m, "---------\n");
3028         for_each_intel_crtc(dev, crtc) {
3029                 struct intel_crtc_state *pipe_config;
3030
3031                 drm_modeset_lock(&crtc->base.mutex, NULL);
3032                 pipe_config = to_intel_crtc_state(crtc->base.state);
3033
3034                 seq_printf(m, "CRTC %d: pipe: %c, active=%s, (size=%dx%d), dither=%s, bpp=%d\n",
3035                            crtc->base.base.id, pipe_name(crtc->pipe),
3036                            yesno(pipe_config->base.active),
3037                            pipe_config->pipe_src_w, pipe_config->pipe_src_h,
3038                            yesno(pipe_config->dither), pipe_config->pipe_bpp);
3039
3040                 if (pipe_config->base.active) {
3041                         struct intel_plane *cursor =
3042                                 to_intel_plane(crtc->base.cursor);
3043
3044                         intel_crtc_info(m, crtc);
3045
3046                         seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x\n",
3047                                    yesno(cursor->base.state->visible),
3048                                    cursor->base.state->crtc_x,
3049                                    cursor->base.state->crtc_y,
3050                                    cursor->base.state->crtc_w,
3051                                    cursor->base.state->crtc_h,
3052                                    cursor->cursor.base);
3053                         intel_scaler_info(m, crtc);
3054                         intel_plane_info(m, crtc);
3055                 }
3056
3057                 seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
3058                            yesno(!crtc->cpu_fifo_underrun_disabled),
3059                            yesno(!crtc->pch_fifo_underrun_disabled));
3060                 drm_modeset_unlock(&crtc->base.mutex);
3061         }
3062
3063         seq_printf(m, "\n");
3064         seq_printf(m, "Connector info\n");
3065         seq_printf(m, "--------------\n");
3066         mutex_lock(&dev->mode_config.mutex);
3067         drm_connector_list_iter_begin(dev, &conn_iter);
3068         drm_for_each_connector_iter(connector, &conn_iter)
3069                 intel_connector_info(m, connector);
3070         drm_connector_list_iter_end(&conn_iter);
3071         mutex_unlock(&dev->mode_config.mutex);
3072
3073         intel_runtime_pm_put(dev_priv, wakeref);
3074
3075         return 0;
3076 }
3077
3078 static int i915_engine_info(struct seq_file *m, void *unused)
3079 {
3080         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3081         struct intel_engine_cs *engine;
3082         intel_wakeref_t wakeref;
3083         enum intel_engine_id id;
3084         struct drm_printer p;
3085
3086         wakeref = intel_runtime_pm_get(dev_priv);
3087
3088         seq_printf(m, "GT awake? %s\n", yesno(dev_priv->gt.awake));
3089         seq_printf(m, "Global active requests: %d\n",
3090                    dev_priv->gt.active_requests);
3091         seq_printf(m, "CS timestamp frequency: %u kHz\n",
3092                    RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz);
3093
3094         p = drm_seq_file_printer(m);
3095         for_each_engine(engine, dev_priv, id)
3096                 intel_engine_dump(engine, &p, "%s\n", engine->name);
3097
3098         intel_runtime_pm_put(dev_priv, wakeref);
3099
3100         return 0;
3101 }
3102
3103 static int i915_rcs_topology(struct seq_file *m, void *unused)
3104 {
3105         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3106         struct drm_printer p = drm_seq_file_printer(m);
3107
3108         intel_device_info_dump_topology(&RUNTIME_INFO(dev_priv)->sseu, &p);
3109
3110         return 0;
3111 }
3112
3113 static int i915_shrinker_info(struct seq_file *m, void *unused)
3114 {
3115         struct drm_i915_private *i915 = node_to_i915(m->private);
3116
3117         seq_printf(m, "seeks = %d\n", i915->mm.shrinker.seeks);
3118         seq_printf(m, "batch = %lu\n", i915->mm.shrinker.batch);
3119
3120         return 0;
3121 }
3122
3123 static int i915_shared_dplls_info(struct seq_file *m, void *unused)
3124 {
3125         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3126         struct drm_device *dev = &dev_priv->drm;
3127         int i;
3128
3129         drm_modeset_lock_all(dev);
3130         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3131                 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
3132
3133                 seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name,
3134                            pll->info->id);
3135                 seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
3136                            pll->state.crtc_mask, pll->active_mask, yesno(pll->on));
3137                 seq_printf(m, " tracked hardware state:\n");
3138                 seq_printf(m, " dpll:    0x%08x\n", pll->state.hw_state.dpll);
3139                 seq_printf(m, " dpll_md: 0x%08x\n",
3140                            pll->state.hw_state.dpll_md);
3141                 seq_printf(m, " fp0:     0x%08x\n", pll->state.hw_state.fp0);
3142                 seq_printf(m, " fp1:     0x%08x\n", pll->state.hw_state.fp1);
3143                 seq_printf(m, " wrpll:   0x%08x\n", pll->state.hw_state.wrpll);
3144                 seq_printf(m, " cfgcr0:  0x%08x\n", pll->state.hw_state.cfgcr0);
3145                 seq_printf(m, " cfgcr1:  0x%08x\n", pll->state.hw_state.cfgcr1);
3146                 seq_printf(m, " mg_refclkin_ctl:        0x%08x\n",
3147                            pll->state.hw_state.mg_refclkin_ctl);
3148                 seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n",
3149                            pll->state.hw_state.mg_clktop2_coreclkctl1);
3150                 seq_printf(m, " mg_clktop2_hsclkctl:    0x%08x\n",
3151                            pll->state.hw_state.mg_clktop2_hsclkctl);
3152                 seq_printf(m, " mg_pll_div0:  0x%08x\n",
3153                            pll->state.hw_state.mg_pll_div0);
3154                 seq_printf(m, " mg_pll_div1:  0x%08x\n",
3155                            pll->state.hw_state.mg_pll_div1);
3156                 seq_printf(m, " mg_pll_lf:    0x%08x\n",
3157                            pll->state.hw_state.mg_pll_lf);
3158                 seq_printf(m, " mg_pll_frac_lock: 0x%08x\n",
3159                            pll->state.hw_state.mg_pll_frac_lock);
3160                 seq_printf(m, " mg_pll_ssc:   0x%08x\n",
3161                            pll->state.hw_state.mg_pll_ssc);
3162                 seq_printf(m, " mg_pll_bias:  0x%08x\n",
3163                            pll->state.hw_state.mg_pll_bias);
3164                 seq_printf(m, " mg_pll_tdc_coldst_bias: 0x%08x\n",
3165                            pll->state.hw_state.mg_pll_tdc_coldst_bias);
3166         }
3167         drm_modeset_unlock_all(dev);
3168
3169         return 0;
3170 }
3171
3172 static int i915_wa_registers(struct seq_file *m, void *unused)
3173 {
3174         struct drm_i915_private *i915 = node_to_i915(m->private);
3175         const struct i915_wa_list *wal = &i915->engine[RCS0]->ctx_wa_list;
3176         struct i915_wa *wa;
3177         unsigned int i;
3178
3179         seq_printf(m, "Workarounds applied: %u\n", wal->count);
3180         for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
3181                 seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n",
3182                            i915_mmio_reg_offset(wa->reg), wa->val, wa->mask);
3183
3184         return 0;
3185 }
3186
3187 static int i915_ipc_status_show(struct seq_file *m, void *data)
3188 {
3189         struct drm_i915_private *dev_priv = m->private;
3190
3191         seq_printf(m, "Isochronous Priority Control: %s\n",
3192                         yesno(dev_priv->ipc_enabled));
3193         return 0;
3194 }
3195
3196 static int i915_ipc_status_open(struct inode *inode, struct file *file)
3197 {
3198         struct drm_i915_private *dev_priv = inode->i_private;
3199
3200         if (!HAS_IPC(dev_priv))
3201                 return -ENODEV;
3202
3203         return single_open(file, i915_ipc_status_show, dev_priv);
3204 }
3205
3206 static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
3207                                      size_t len, loff_t *offp)
3208 {
3209         struct seq_file *m = file->private_data;
3210         struct drm_i915_private *dev_priv = m->private;
3211         intel_wakeref_t wakeref;
3212         bool enable;
3213         int ret;
3214
3215         ret = kstrtobool_from_user(ubuf, len, &enable);
3216         if (ret < 0)
3217                 return ret;
3218
3219         with_intel_runtime_pm(dev_priv, wakeref) {
3220                 if (!dev_priv->ipc_enabled && enable)
3221                         DRM_INFO("Enabling IPC: WM will be proper only after next commit\n");
3222                 dev_priv->wm.distrust_bios_wm = true;
3223                 dev_priv->ipc_enabled = enable;
3224                 intel_enable_ipc(dev_priv);
3225         }
3226
3227         return len;
3228 }
3229
3230 static const struct file_operations i915_ipc_status_fops = {
3231         .owner = THIS_MODULE,
3232         .open = i915_ipc_status_open,
3233         .read = seq_read,
3234         .llseek = seq_lseek,
3235         .release = single_release,
3236         .write = i915_ipc_status_write
3237 };
3238
3239 static int i915_ddb_info(struct seq_file *m, void *unused)
3240 {
3241         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3242         struct drm_device *dev = &dev_priv->drm;
3243         struct skl_ddb_entry *entry;
3244         struct intel_crtc *crtc;
3245
3246         if (INTEL_GEN(dev_priv) < 9)
3247                 return -ENODEV;
3248
3249         drm_modeset_lock_all(dev);
3250
3251         seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
3252
3253         for_each_intel_crtc(&dev_priv->drm, crtc) {
3254                 struct intel_crtc_state *crtc_state =
3255                         to_intel_crtc_state(crtc->base.state);
3256                 enum pipe pipe = crtc->pipe;
3257                 enum plane_id plane_id;
3258
3259                 seq_printf(m, "Pipe %c\n", pipe_name(pipe));
3260
3261                 for_each_plane_id_on_crtc(crtc, plane_id) {
3262                         entry = &crtc_state->wm.skl.plane_ddb_y[plane_id];
3263                         seq_printf(m, "  Plane%-8d%8u%8u%8u\n", plane_id + 1,
3264                                    entry->start, entry->end,
3265                                    skl_ddb_entry_size(entry));
3266                 }
3267
3268                 entry = &crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
3269                 seq_printf(m, "  %-13s%8u%8u%8u\n", "Cursor", entry->start,
3270                            entry->end, skl_ddb_entry_size(entry));
3271         }
3272
3273         drm_modeset_unlock_all(dev);
3274
3275         return 0;
3276 }
3277
3278 static void drrs_status_per_crtc(struct seq_file *m,
3279                                  struct drm_device *dev,
3280                                  struct intel_crtc *intel_crtc)
3281 {
3282         struct drm_i915_private *dev_priv = to_i915(dev);
3283         struct i915_drrs *drrs = &dev_priv->drrs;
3284         int vrefresh = 0;
3285         struct drm_connector *connector;
3286         struct drm_connector_list_iter conn_iter;
3287
3288         drm_connector_list_iter_begin(dev, &conn_iter);
3289         drm_for_each_connector_iter(connector, &conn_iter) {
3290                 if (connector->state->crtc != &intel_crtc->base)
3291                         continue;
3292
3293                 seq_printf(m, "%s:\n", connector->name);
3294         }
3295         drm_connector_list_iter_end(&conn_iter);
3296
3297         if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT)
3298                 seq_puts(m, "\tVBT: DRRS_type: Static");
3299         else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT)
3300                 seq_puts(m, "\tVBT: DRRS_type: Seamless");
3301         else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED)
3302                 seq_puts(m, "\tVBT: DRRS_type: None");
3303         else
3304                 seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value");
3305
3306         seq_puts(m, "\n\n");
3307
3308         if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
3309                 struct intel_panel *panel;
3310
3311                 mutex_lock(&drrs->mutex);
3312                 /* DRRS Supported */
3313                 seq_puts(m, "\tDRRS Supported: Yes\n");
3314
3315                 /* disable_drrs() will make drrs->dp NULL */
3316                 if (!drrs->dp) {
3317                         seq_puts(m, "Idleness DRRS: Disabled\n");
3318                         if (dev_priv->psr.enabled)
3319                                 seq_puts(m,
3320                                 "\tAs PSR is enabled, DRRS is not enabled\n");
3321                         mutex_unlock(&drrs->mutex);
3322                         return;
3323                 }
3324
3325                 panel = &drrs->dp->attached_connector->panel;
3326                 seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
3327                                         drrs->busy_frontbuffer_bits);
3328
3329                 seq_puts(m, "\n\t\t");
3330                 if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
3331                         seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
3332                         vrefresh = panel->fixed_mode->vrefresh;
3333                 } else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
3334                         seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
3335                         vrefresh = panel->downclock_mode->vrefresh;
3336                 } else {
3337                         seq_printf(m, "DRRS_State: Unknown(%d)\n",
3338                                                 drrs->refresh_rate_type);
3339                         mutex_unlock(&drrs->mutex);
3340                         return;
3341                 }
3342                 seq_printf(m, "\t\tVrefresh: %d", vrefresh);
3343
3344                 seq_puts(m, "\n\t\t");
3345                 mutex_unlock(&drrs->mutex);
3346         } else {
3347                 /* DRRS not supported. Print the VBT parameter*/
3348                 seq_puts(m, "\tDRRS Supported : No");
3349         }
3350         seq_puts(m, "\n");
3351 }
3352
3353 static int i915_drrs_status(struct seq_file *m, void *unused)
3354 {
3355         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3356         struct drm_device *dev = &dev_priv->drm;
3357         struct intel_crtc *intel_crtc;
3358         int active_crtc_cnt = 0;
3359
3360         drm_modeset_lock_all(dev);
3361         for_each_intel_crtc(dev, intel_crtc) {
3362                 if (intel_crtc->base.state->active) {
3363                         active_crtc_cnt++;
3364                         seq_printf(m, "\nCRTC %d:  ", active_crtc_cnt);
3365
3366                         drrs_status_per_crtc(m, dev, intel_crtc);
3367                 }
3368         }
3369         drm_modeset_unlock_all(dev);
3370
3371         if (!active_crtc_cnt)
3372                 seq_puts(m, "No active crtc found\n");
3373
3374         return 0;
3375 }
3376
3377 static int i915_dp_mst_info(struct seq_file *m, void *unused)
3378 {
3379         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3380         struct drm_device *dev = &dev_priv->drm;
3381         struct intel_encoder *intel_encoder;
3382         struct intel_digital_port *intel_dig_port;
3383         struct drm_connector *connector;
3384         struct drm_connector_list_iter conn_iter;
3385
3386         drm_connector_list_iter_begin(dev, &conn_iter);
3387         drm_for_each_connector_iter(connector, &conn_iter) {
3388                 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
3389                         continue;
3390
3391                 intel_encoder = intel_attached_encoder(connector);
3392                 if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
3393                         continue;
3394
3395                 intel_dig_port = enc_to_dig_port(&intel_encoder->base);
3396                 if (!intel_dig_port->dp.can_mst)
3397                         continue;
3398
3399                 seq_printf(m, "MST Source Port %c\n",
3400                            port_name(intel_dig_port->base.port));
3401                 drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
3402         }
3403         drm_connector_list_iter_end(&conn_iter);
3404
3405         return 0;
3406 }
3407
3408 static ssize_t i915_displayport_test_active_write(struct file *file,
3409                                                   const char __user *ubuf,
3410                                                   size_t len, loff_t *offp)
3411 {
3412         char *input_buffer;
3413         int status = 0;
3414         struct drm_device *dev;
3415         struct drm_connector *connector;
3416         struct drm_connector_list_iter conn_iter;
3417         struct intel_dp *intel_dp;
3418         int val = 0;
3419
3420         dev = ((struct seq_file *)file->private_data)->private;
3421
3422         if (len == 0)
3423                 return 0;
3424
3425         input_buffer = memdup_user_nul(ubuf, len);
3426         if (IS_ERR(input_buffer))
3427                 return PTR_ERR(input_buffer);
3428
3429         DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);
3430
3431         drm_connector_list_iter_begin(dev, &conn_iter);
3432         drm_for_each_connector_iter(connector, &conn_iter) {
3433                 struct intel_encoder *encoder;
3434
3435                 if (connector->connector_type !=
3436                     DRM_MODE_CONNECTOR_DisplayPort)
3437                         continue;
3438
3439                 encoder = to_intel_encoder(connector->encoder);
3440                 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3441                         continue;
3442
3443                 if (encoder && connector->status == connector_status_connected) {
3444                         intel_dp = enc_to_intel_dp(&encoder->base);
3445                         status = kstrtoint(input_buffer, 10, &val);
3446                         if (status < 0)
3447                                 break;
3448                         DRM_DEBUG_DRIVER("Got %d for test active\n", val);
3449                         /* To prevent erroneous activation of the compliance
3450                          * testing code, only accept an actual value of 1 here
3451                          */
3452                         if (val == 1)
3453                                 intel_dp->compliance.test_active = 1;
3454                         else
3455                                 intel_dp->compliance.test_active = 0;
3456                 }
3457         }
3458         drm_connector_list_iter_end(&conn_iter);
3459         kfree(input_buffer);
3460         if (status < 0)
3461                 return status;
3462
3463         *offp += len;
3464         return len;
3465 }
3466
3467 static int i915_displayport_test_active_show(struct seq_file *m, void *data)
3468 {
3469         struct drm_i915_private *dev_priv = m->private;
3470         struct drm_device *dev = &dev_priv->drm;
3471         struct drm_connector *connector;
3472         struct drm_connector_list_iter conn_iter;
3473         struct intel_dp *intel_dp;
3474
3475         drm_connector_list_iter_begin(dev, &conn_iter);
3476         drm_for_each_connector_iter(connector, &conn_iter) {
3477                 struct intel_encoder *encoder;
3478
3479                 if (connector->connector_type !=
3480                     DRM_MODE_CONNECTOR_DisplayPort)
3481                         continue;
3482
3483                 encoder = to_intel_encoder(connector->encoder);
3484                 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3485                         continue;
3486
3487                 if (encoder && connector->status == connector_status_connected) {
3488                         intel_dp = enc_to_intel_dp(&encoder->base);
3489                         if (intel_dp->compliance.test_active)
3490                                 seq_puts(m, "1");
3491                         else
3492                                 seq_puts(m, "0");
3493                 } else
3494                         seq_puts(m, "0");
3495         }
3496         drm_connector_list_iter_end(&conn_iter);
3497
3498         return 0;
3499 }
3500
3501 static int i915_displayport_test_active_open(struct inode *inode,
3502                                              struct file *file)
3503 {
3504         return single_open(file, i915_displayport_test_active_show,
3505                            inode->i_private);
3506 }
3507
3508 static const struct file_operations i915_displayport_test_active_fops = {
3509         .owner = THIS_MODULE,
3510         .open = i915_displayport_test_active_open,
3511         .read = seq_read,
3512         .llseek = seq_lseek,
3513         .release = single_release,
3514         .write = i915_displayport_test_active_write
3515 };
3516
3517 static int i915_displayport_test_data_show(struct seq_file *m, void *data)
3518 {
3519         struct drm_i915_private *dev_priv = m->private;
3520         struct drm_device *dev = &dev_priv->drm;
3521         struct drm_connector *connector;
3522         struct drm_connector_list_iter conn_iter;
3523         struct intel_dp *intel_dp;
3524
3525         drm_connector_list_iter_begin(dev, &conn_iter);
3526         drm_for_each_connector_iter(connector, &conn_iter) {
3527                 struct intel_encoder *encoder;
3528
3529                 if (connector->connector_type !=
3530                     DRM_MODE_CONNECTOR_DisplayPort)
3531                         continue;
3532
3533                 encoder = to_intel_encoder(connector->encoder);
3534                 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3535                         continue;
3536
3537                 if (encoder && connector->status == connector_status_connected) {
3538                         intel_dp = enc_to_intel_dp(&encoder->base);
3539                         if (intel_dp->compliance.test_type ==
3540                             DP_TEST_LINK_EDID_READ)
3541                                 seq_printf(m, "%lx",
3542                                            intel_dp->compliance.test_data.edid);
3543                         else if (intel_dp->compliance.test_type ==
3544                                  DP_TEST_LINK_VIDEO_PATTERN) {
3545                                 seq_printf(m, "hdisplay: %d\n",
3546                                            intel_dp->compliance.test_data.hdisplay);
3547                                 seq_printf(m, "vdisplay: %d\n",
3548                                            intel_dp->compliance.test_data.vdisplay);
3549                                 seq_printf(m, "bpc: %u\n",
3550                                            intel_dp->compliance.test_data.bpc);
3551                         }
3552                 } else
3553                         seq_puts(m, "0");
3554         }
3555         drm_connector_list_iter_end(&conn_iter);
3556
3557         return 0;
3558 }
3559 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data);
3560
3561 static int i915_displayport_test_type_show(struct seq_file *m, void *data)
3562 {
3563         struct drm_i915_private *dev_priv = m->private;
3564         struct drm_device *dev = &dev_priv->drm;
3565         struct drm_connector *connector;
3566         struct drm_connector_list_iter conn_iter;
3567         struct intel_dp *intel_dp;
3568
3569         drm_connector_list_iter_begin(dev, &conn_iter);
3570         drm_for_each_connector_iter(connector, &conn_iter) {
3571                 struct intel_encoder *encoder;
3572
3573                 if (connector->connector_type !=
3574                     DRM_MODE_CONNECTOR_DisplayPort)
3575                         continue;
3576
3577                 encoder = to_intel_encoder(connector->encoder);
3578                 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3579                         continue;
3580
3581                 if (encoder && connector->status == connector_status_connected) {
3582                         intel_dp = enc_to_intel_dp(&encoder->base);
3583                         seq_printf(m, "%02lx", intel_dp->compliance.test_type);
3584                 } else
3585                         seq_puts(m, "0");
3586         }
3587         drm_connector_list_iter_end(&conn_iter);
3588
3589         return 0;
3590 }
3591 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type);
3592
3593 static void wm_latency_show(struct seq_file *m, const u16 wm[8])
3594 {
3595         struct drm_i915_private *dev_priv = m->private;
3596         struct drm_device *dev = &dev_priv->drm;
3597         int level;
3598         int num_levels;
3599
3600         if (IS_CHERRYVIEW(dev_priv))
3601                 num_levels = 3;
3602         else if (IS_VALLEYVIEW(dev_priv))
3603                 num_levels = 1;
3604         else if (IS_G4X(dev_priv))
3605                 num_levels = 3;
3606         else
3607                 num_levels = ilk_wm_max_level(dev_priv) + 1;
3608
3609         drm_modeset_lock_all(dev);
3610
3611         for (level = 0; level < num_levels; level++) {
3612                 unsigned int latency = wm[level];
3613
3614                 /*
3615                  * - WM1+ latency values in 0.5us units
3616                  * - latencies are in us on gen9/vlv/chv
3617                  */
3618                 if (INTEL_GEN(dev_priv) >= 9 ||
3619                     IS_VALLEYVIEW(dev_priv) ||
3620                     IS_CHERRYVIEW(dev_priv) ||
3621                     IS_G4X(dev_priv))
3622                         latency *= 10;
3623                 else if (level > 0)
3624                         latency *= 5;
3625
3626                 seq_printf(m, "WM%d %u (%u.%u usec)\n",
3627                            level, wm[level], latency / 10, latency % 10);
3628         }
3629
3630         drm_modeset_unlock_all(dev);
3631 }
3632
3633 static int pri_wm_latency_show(struct seq_file *m, void *data)
3634 {
3635         struct drm_i915_private *dev_priv = m->private;
3636         const u16 *latencies;
3637
3638         if (INTEL_GEN(dev_priv) >= 9)
3639                 latencies = dev_priv->wm.skl_latency;
3640         else
3641                 latencies = dev_priv->wm.pri_latency;
3642
3643         wm_latency_show(m, latencies);
3644
3645         return 0;
3646 }
3647
3648 static int spr_wm_latency_show(struct seq_file *m, void *data)
3649 {
3650         struct drm_i915_private *dev_priv = m->private;
3651         const u16 *latencies;
3652
3653         if (INTEL_GEN(dev_priv) >= 9)
3654                 latencies = dev_priv->wm.skl_latency;
3655         else
3656                 latencies = dev_priv->wm.spr_latency;
3657
3658         wm_latency_show(m, latencies);
3659
3660         return 0;
3661 }
3662
3663 static int cur_wm_latency_show(struct seq_file *m, void *data)
3664 {
3665         struct drm_i915_private *dev_priv = m->private;
3666         const u16 *latencies;
3667
3668         if (INTEL_GEN(dev_priv) >= 9)
3669                 latencies = dev_priv->wm.skl_latency;
3670         else
3671                 latencies = dev_priv->wm.cur_latency;
3672
3673         wm_latency_show(m, latencies);
3674
3675         return 0;
3676 }
3677
3678 static int pri_wm_latency_open(struct inode *inode, struct file *file)
3679 {
3680         struct drm_i915_private *dev_priv = inode->i_private;
3681
3682         if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
3683                 return -ENODEV;
3684
3685         return single_open(file, pri_wm_latency_show, dev_priv);
3686 }
3687
3688 static int spr_wm_latency_open(struct inode *inode, struct file *file)
3689 {
3690         struct drm_i915_private *dev_priv = inode->i_private;
3691
3692         if (HAS_GMCH(dev_priv))
3693                 return -ENODEV;
3694
3695         return single_open(file, spr_wm_latency_show, dev_priv);
3696 }
3697
3698 static int cur_wm_latency_open(struct inode *inode, struct file *file)
3699 {
3700         struct drm_i915_private *dev_priv = inode->i_private;
3701
3702         if (HAS_GMCH(dev_priv))
3703                 return -ENODEV;
3704
3705         return single_open(file, cur_wm_latency_show, dev_priv);
3706 }
3707
3708 static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
3709                                 size_t len, loff_t *offp, u16 wm[8])
3710 {
3711         struct seq_file *m = file->private_data;
3712         struct drm_i915_private *dev_priv = m->private;
3713         struct drm_device *dev = &dev_priv->drm;
3714         u16 new[8] = { 0 };
3715         int num_levels;
3716         int level;
3717         int ret;
3718         char tmp[32];
3719
3720         if (IS_CHERRYVIEW(dev_priv))
3721                 num_levels = 3;
3722         else if (IS_VALLEYVIEW(dev_priv))
3723                 num_levels = 1;
3724         else if (IS_G4X(dev_priv))
3725                 num_levels = 3;
3726         else
3727                 num_levels = ilk_wm_max_level(dev_priv) + 1;
3728
3729         if (len >= sizeof(tmp))
3730                 return -EINVAL;
3731
3732         if (copy_from_user(tmp, ubuf, len))
3733                 return -EFAULT;
3734
3735         tmp[len] = '\0';
3736
3737         ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
3738                      &new[0], &new[1], &new[2], &new[3],
3739                      &new[4], &new[5], &new[6], &new[7]);
3740         if (ret != num_levels)
3741                 return -EINVAL;
3742
3743         drm_modeset_lock_all(dev);
3744
3745         for (level = 0; level < num_levels; level++)
3746                 wm[level] = new[level];
3747
3748         drm_modeset_unlock_all(dev);
3749
3750         return len;
3751 }
3752
3753
3754 static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
3755                                     size_t len, loff_t *offp)
3756 {
3757         struct seq_file *m = file->private_data;
3758         struct drm_i915_private *dev_priv = m->private;
3759         u16 *latencies;
3760
3761         if (INTEL_GEN(dev_priv) >= 9)
3762                 latencies = dev_priv->wm.skl_latency;
3763         else
3764                 latencies = dev_priv->wm.pri_latency;
3765
3766         return wm_latency_write(file, ubuf, len, offp, latencies);
3767 }
3768
3769 static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
3770                                     size_t len, loff_t *offp)
3771 {
3772         struct seq_file *m = file->private_data;
3773         struct drm_i915_private *dev_priv = m->private;
3774         u16 *latencies;
3775
3776         if (INTEL_GEN(dev_priv) >= 9)
3777                 latencies = dev_priv->wm.skl_latency;
3778         else
3779                 latencies = dev_priv->wm.spr_latency;
3780
3781         return wm_latency_write(file, ubuf, len, offp, latencies);
3782 }
3783
3784 static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
3785                                     size_t len, loff_t *offp)
3786 {
3787         struct seq_file *m = file->private_data;
3788         struct drm_i915_private *dev_priv = m->private;
3789         u16 *latencies;
3790
3791         if (INTEL_GEN(dev_priv) >= 9)
3792                 latencies = dev_priv->wm.skl_latency;
3793         else
3794                 latencies = dev_priv->wm.cur_latency;
3795
3796         return wm_latency_write(file, ubuf, len, offp, latencies);
3797 }
3798
3799 static const struct file_operations i915_pri_wm_latency_fops = {
3800         .owner = THIS_MODULE,
3801         .open = pri_wm_latency_open,
3802         .read = seq_read,
3803         .llseek = seq_lseek,
3804         .release = single_release,
3805         .write = pri_wm_latency_write
3806 };
3807
3808 static const struct file_operations i915_spr_wm_latency_fops = {
3809         .owner = THIS_MODULE,
3810         .open = spr_wm_latency_open,
3811         .read = seq_read,
3812         .llseek = seq_lseek,
3813         .release = single_release,
3814         .write = spr_wm_latency_write
3815 };
3816
3817 static const struct file_operations i915_cur_wm_latency_fops = {
3818         .owner = THIS_MODULE,
3819         .open = cur_wm_latency_open,
3820         .read = seq_read,
3821         .llseek = seq_lseek,
3822         .release = single_release,
3823         .write = cur_wm_latency_write
3824 };
3825
3826 static int
3827 i915_wedged_get(void *data, u64 *val)
3828 {
3829         int ret = i915_terminally_wedged(data);
3830
3831         switch (ret) {
3832         case -EIO:
3833                 *val = 1;
3834                 return 0;
3835         case 0:
3836                 *val = 0;
3837                 return 0;
3838         default:
3839                 return ret;
3840         }
3841 }
3842
3843 static int
3844 i915_wedged_set(void *data, u64 val)
3845 {
3846         struct drm_i915_private *i915 = data;
3847
3848         /* Flush any previous reset before applying for a new one */
3849         wait_event(i915->gpu_error.reset_queue,
3850                    !test_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags));
3851
3852         i915_handle_error(i915, val, I915_ERROR_CAPTURE,
3853                           "Manually set wedged engine mask = %llx", val);
3854         return 0;
3855 }
3856
3857 DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
3858                         i915_wedged_get, i915_wedged_set,
3859                         "%llu\n");
3860
3861 #define DROP_UNBOUND    BIT(0)
3862 #define DROP_BOUND      BIT(1)
3863 #define DROP_RETIRE     BIT(2)
3864 #define DROP_ACTIVE     BIT(3)
3865 #define DROP_FREED      BIT(4)
3866 #define DROP_SHRINK_ALL BIT(5)
3867 #define DROP_IDLE       BIT(6)
3868 #define DROP_RESET_ACTIVE       BIT(7)
3869 #define DROP_RESET_SEQNO        BIT(8)
3870 #define DROP_ALL (DROP_UNBOUND  | \
3871                   DROP_BOUND    | \
3872                   DROP_RETIRE   | \
3873                   DROP_ACTIVE   | \
3874                   DROP_FREED    | \
3875                   DROP_SHRINK_ALL |\
3876                   DROP_IDLE     | \
3877                   DROP_RESET_ACTIVE | \
3878                   DROP_RESET_SEQNO)
3879 static int
3880 i915_drop_caches_get(void *data, u64 *val)
3881 {
3882         *val = DROP_ALL;
3883
3884         return 0;
3885 }
3886
3887 static int
3888 i915_drop_caches_set(void *data, u64 val)
3889 {
3890         struct drm_i915_private *i915 = data;
3891
3892         DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
3893                   val, val & DROP_ALL);
3894
3895         if (val & DROP_RESET_ACTIVE &&
3896             wait_for(intel_engines_are_idle(i915), I915_IDLE_ENGINES_TIMEOUT))
3897                 i915_gem_set_wedged(i915);
3898
3899         /* No need to check and wait for gpu resets, only libdrm auto-restarts
3900          * on ioctls on -EAGAIN. */
3901         if (val & (DROP_ACTIVE | DROP_RETIRE | DROP_RESET_SEQNO)) {
3902                 int ret;
3903
3904                 ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
3905                 if (ret)
3906                         return ret;
3907
3908                 if (val & DROP_ACTIVE)
3909                         ret = i915_gem_wait_for_idle(i915,
3910                                                      I915_WAIT_INTERRUPTIBLE |
3911                                                      I915_WAIT_LOCKED,
3912                                                      MAX_SCHEDULE_TIMEOUT);
3913
3914                 if (val & DROP_RETIRE)
3915                         i915_retire_requests(i915);
3916
3917                 mutex_unlock(&i915->drm.struct_mutex);
3918         }
3919
3920         if (val & DROP_RESET_ACTIVE && i915_terminally_wedged(i915))
3921                 i915_handle_error(i915, ALL_ENGINES, 0, NULL);
3922
3923         fs_reclaim_acquire(GFP_KERNEL);
3924         if (val & DROP_BOUND)
3925                 i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_BOUND);
3926
3927         if (val & DROP_UNBOUND)
3928                 i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_UNBOUND);
3929
3930         if (val & DROP_SHRINK_ALL)
3931                 i915_gem_shrink_all(i915);
3932         fs_reclaim_release(GFP_KERNEL);
3933
3934         if (val & DROP_IDLE) {
3935                 do {
3936                         if (READ_ONCE(i915->gt.active_requests))
3937                                 flush_delayed_work(&i915->gt.retire_work);
3938                         drain_delayed_work(&i915->gt.idle_work);
3939                 } while (READ_ONCE(i915->gt.awake));
3940         }
3941
3942         if (val & DROP_FREED)
3943                 i915_gem_drain_freed_objects(i915);
3944
3945         return 0;
3946 }
3947
3948 DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
3949                         i915_drop_caches_get, i915_drop_caches_set,
3950                         "0x%08llx\n");
3951
3952 static int
3953 i915_cache_sharing_get(void *data, u64 *val)
3954 {
3955         struct drm_i915_private *dev_priv = data;
3956         intel_wakeref_t wakeref;
3957         u32 snpcr = 0;
3958
3959         if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
3960                 return -ENODEV;
3961
3962         with_intel_runtime_pm(dev_priv, wakeref)
3963                 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
3964
3965         *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
3966
3967         return 0;
3968 }
3969
3970 static int
3971 i915_cache_sharing_set(void *data, u64 val)
3972 {
3973         struct drm_i915_private *dev_priv = data;
3974         intel_wakeref_t wakeref;
3975
3976         if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
3977                 return -ENODEV;
3978
3979         if (val > 3)
3980                 return -EINVAL;
3981
3982         DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
3983         with_intel_runtime_pm(dev_priv, wakeref) {
3984                 u32 snpcr;
3985
3986                 /* Update the cache sharing policy here as well */
3987                 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
3988                 snpcr &= ~GEN6_MBC_SNPCR_MASK;
3989                 snpcr |= val << GEN6_MBC_SNPCR_SHIFT;
3990                 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
3991         }
3992
3993         return 0;
3994 }
3995
3996 DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
3997                         i915_cache_sharing_get, i915_cache_sharing_set,
3998                         "%llu\n");
3999
4000 static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv,
4001                                           struct sseu_dev_info *sseu)
4002 {
4003 #define SS_MAX 2
4004         const int ss_max = SS_MAX;
4005         u32 sig1[SS_MAX], sig2[SS_MAX];
4006         int ss;
4007
4008         sig1[0] = I915_READ(CHV_POWER_SS0_SIG1);
4009         sig1[1] = I915_READ(CHV_POWER_SS1_SIG1);
4010         sig2[0] = I915_READ(CHV_POWER_SS0_SIG2);
4011         sig2[1] = I915_READ(CHV_POWER_SS1_SIG2);
4012
4013         for (ss = 0; ss < ss_max; ss++) {
4014                 unsigned int eu_cnt;
4015
4016                 if (sig1[ss] & CHV_SS_PG_ENABLE)
4017                         /* skip disabled subslice */
4018                         continue;
4019
4020                 sseu->slice_mask = BIT(0);
4021                 sseu->subslice_mask[0] |= BIT(ss);
4022                 eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
4023                          ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
4024                          ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
4025                          ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
4026                 sseu->eu_total += eu_cnt;
4027                 sseu->eu_per_subslice = max_t(unsigned int,
4028                                               sseu->eu_per_subslice, eu_cnt);
4029         }
4030 #undef SS_MAX
4031 }
4032
4033 static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
4034                                      struct sseu_dev_info *sseu)
4035 {
4036 #define SS_MAX 6
4037         const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
4038         u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
4039         int s, ss;
4040
4041         for (s = 0; s < info->sseu.max_slices; s++) {
4042                 /*
4043                  * FIXME: Valid SS Mask respects the spec and read
4044                  * only valid bits for those registers, excluding reserved
4045                  * although this seems wrong because it would leave many
4046                  * subslices without ACK.
4047                  */
4048                 s_reg[s] = I915_READ(GEN10_SLICE_PGCTL_ACK(s)) &
4049                         GEN10_PGCTL_VALID_SS_MASK(s);
4050                 eu_reg[2 * s] = I915_READ(GEN10_SS01_EU_PGCTL_ACK(s));
4051                 eu_reg[2 * s + 1] = I915_READ(GEN10_SS23_EU_PGCTL_ACK(s));
4052         }
4053
4054         eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
4055                      GEN9_PGCTL_SSA_EU19_ACK |
4056                      GEN9_PGCTL_SSA_EU210_ACK |
4057                      GEN9_PGCTL_SSA_EU311_ACK;
4058         eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
4059                      GEN9_PGCTL_SSB_EU19_ACK |
4060                      GEN9_PGCTL_SSB_EU210_ACK |
4061                      GEN9_PGCTL_SSB_EU311_ACK;
4062
4063         for (s = 0; s < info->sseu.max_slices; s++) {
4064                 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
4065                         /* skip disabled slice */
4066                         continue;
4067
4068                 sseu->slice_mask |= BIT(s);
4069                 sseu->subslice_mask[s] = info->sseu.subslice_mask[s];
4070
4071                 for (ss = 0; ss < info->sseu.max_subslices; ss++) {
4072                         unsigned int eu_cnt;
4073
4074                         if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
4075                                 /* skip disabled subslice */
4076                                 continue;
4077
4078                         eu_cnt = 2 * hweight32(eu_reg[2 * s + ss / 2] &
4079                                                eu_mask[ss % 2]);
4080                         sseu->eu_total += eu_cnt;
4081                         sseu->eu_per_subslice = max_t(unsigned int,
4082                                                       sseu->eu_per_subslice,
4083                                                       eu_cnt);
4084                 }
4085         }
4086 #undef SS_MAX
4087 }
4088
4089 static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
4090                                     struct sseu_dev_info *sseu)
4091 {
4092 #define SS_MAX 3
4093         const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
4094         u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
4095         int s, ss;
4096
4097         for (s = 0; s < info->sseu.max_slices; s++) {
4098                 s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s));
4099                 eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s));
4100                 eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s));
4101         }
4102
4103         eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
4104                      GEN9_PGCTL_SSA_EU19_ACK |
4105                      GEN9_PGCTL_SSA_EU210_ACK |
4106                      GEN9_PGCTL_SSA_EU311_ACK;
4107         eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
4108                      GEN9_PGCTL_SSB_EU19_ACK |
4109                      GEN9_PGCTL_SSB_EU210_ACK |
4110                      GEN9_PGCTL_SSB_EU311_ACK;
4111
4112         for (s = 0; s < info->sseu.max_slices; s++) {
4113                 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
4114                         /* skip disabled slice */
4115                         continue;
4116
4117                 sseu->slice_mask |= BIT(s);
4118
4119                 if (IS_GEN9_BC(dev_priv))
4120                         sseu->subslice_mask[s] =
4121                                 RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s];
4122
4123                 for (ss = 0; ss < info->sseu.max_subslices; ss++) {
4124                         unsigned int eu_cnt;
4125
4126                         if (IS_GEN9_LP(dev_priv)) {
4127                                 if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
4128                                         /* skip disabled subslice */
4129                                         continue;
4130
4131                                 sseu->subslice_mask[s] |= BIT(ss);
4132                         }
4133
4134                         eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
4135                                                eu_mask[ss%2]);
4136                         sseu->eu_total += eu_cnt;
4137                         sseu->eu_per_subslice = max_t(unsigned int,
4138                                                       sseu->eu_per_subslice,
4139                                                       eu_cnt);
4140                 }
4141         }
4142 #undef SS_MAX
4143 }
4144
4145 static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
4146                                          struct sseu_dev_info *sseu)
4147 {
4148         u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
4149         int s;
4150
4151         sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK;
4152
4153         if (sseu->slice_mask) {
4154                 sseu->eu_per_subslice =
4155                         RUNTIME_INFO(dev_priv)->sseu.eu_per_subslice;
4156                 for (s = 0; s < fls(sseu->slice_mask); s++) {
4157                         sseu->subslice_mask[s] =
4158                                 RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s];
4159                 }
4160                 sseu->eu_total = sseu->eu_per_subslice *
4161                                  sseu_subslice_total(sseu);
4162
4163                 /* subtract fused off EU(s) from enabled slice(s) */
4164                 for (s = 0; s < fls(sseu->slice_mask); s++) {
4165                         u8 subslice_7eu =
4166                                 RUNTIME_INFO(dev_priv)->sseu.subslice_7eu[s];
4167
4168                         sseu->eu_total -= hweight8(subslice_7eu);
4169                 }
4170         }
4171 }
4172
4173 static void i915_print_sseu_info(struct seq_file *m, bool is_available_info,
4174                                  const struct sseu_dev_info *sseu)
4175 {
4176         struct drm_i915_private *dev_priv = node_to_i915(m->private);
4177         const char *type = is_available_info ? "Available" : "Enabled";
4178         int s;
4179
4180         seq_printf(m, "  %s Slice Mask: %04x\n", type,
4181                    sseu->slice_mask);
4182         seq_printf(m, "  %s Slice Total: %u\n", type,
4183                    hweight8(sseu->slice_mask));
4184         seq_printf(m, "  %s Subslice Total: %u\n", type,
4185                    sseu_subslice_total(sseu));
4186         for (s = 0; s < fls(sseu->slice_mask); s++) {
4187                 seq_printf(m, "  %s Slice%i subslices: %u\n", type,
4188                            s, hweight8(sseu->subslice_mask[s]));
4189         }
4190         seq_printf(m, "  %s EU Total: %u\n", type,
4191                    sseu->eu_total);
4192         seq_printf(m, "  %s EU Per Subslice: %u\n", type,
4193                    sseu->eu_per_subslice);
4194
4195         if (!is_available_info)
4196                 return;
4197
4198         seq_printf(m, "  Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev_priv)));
4199         if (HAS_POOLED_EU(dev_priv))
4200                 seq_printf(m, "  Min EU in pool: %u\n", sseu->min_eu_in_pool);
4201
4202         seq_printf(m, "  Has Slice Power Gating: %s\n",
4203                    yesno(sseu->has_slice_pg));
4204         seq_printf(m, "  Has Subslice Power Gating: %s\n",
4205                    yesno(sseu->has_subslice_pg));
4206         seq_printf(m, "  Has EU Power Gating: %s\n",
4207                    yesno(sseu->has_eu_pg));
4208 }
4209
4210 static int i915_sseu_status(struct seq_file *m, void *unused)
4211 {
4212         struct drm_i915_private *dev_priv = node_to_i915(m->private);
4213         struct sseu_dev_info sseu;
4214         intel_wakeref_t wakeref;
4215
4216         if (INTEL_GEN(dev_priv) < 8)
4217                 return -ENODEV;
4218
4219         seq_puts(m, "SSEU Device Info\n");
4220         i915_print_sseu_info(m, true, &RUNTIME_INFO(dev_priv)->sseu);
4221
4222         seq_puts(m, "SSEU Device Status\n");
4223         memset(&sseu, 0, sizeof(sseu));
4224         sseu.max_slices = RUNTIME_INFO(dev_priv)->sseu.max_slices;
4225         sseu.max_subslices = RUNTIME_INFO(dev_priv)->sseu.max_subslices;
4226         sseu.max_eus_per_subslice =
4227                 RUNTIME_INFO(dev_priv)->sseu.max_eus_per_subslice;
4228
4229         with_intel_runtime_pm(dev_priv, wakeref) {
4230                 if (IS_CHERRYVIEW(dev_priv))
4231                         cherryview_sseu_device_status(dev_priv, &sseu);
4232                 else if (IS_BROADWELL(dev_priv))
4233                         broadwell_sseu_device_status(dev_priv, &sseu);
4234                 else if (IS_GEN(dev_priv, 9))
4235                         gen9_sseu_device_status(dev_priv, &sseu);
4236                 else if (INTEL_GEN(dev_priv) >= 10)
4237                         gen10_sseu_device_status(dev_priv, &sseu);
4238         }
4239
4240         i915_print_sseu_info(m, false, &sseu);
4241
4242         return 0;
4243 }
4244
4245 static int i915_forcewake_open(struct inode *inode, struct file *file)
4246 {
4247         struct drm_i915_private *i915 = inode->i_private;
4248
4249         if (INTEL_GEN(i915) < 6)
4250                 return 0;
4251
4252         file->private_data = (void *)(uintptr_t)intel_runtime_pm_get(i915);
4253         intel_uncore_forcewake_user_get(&i915->uncore);
4254
4255         return 0;
4256 }
4257
4258 static int i915_forcewake_release(struct inode *inode, struct file *file)
4259 {
4260         struct drm_i915_private *i915 = inode->i_private;
4261
4262         if (INTEL_GEN(i915) < 6)
4263                 return 0;
4264
4265         intel_uncore_forcewake_user_put(&i915->uncore);
4266         intel_runtime_pm_put(i915,
4267                              (intel_wakeref_t)(uintptr_t)file->private_data);
4268
4269         return 0;
4270 }
4271
4272 static const struct file_operations i915_forcewake_fops = {
4273         .owner = THIS_MODULE,
4274         .open = i915_forcewake_open,
4275         .release = i915_forcewake_release,
4276 };
4277
4278 static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
4279 {
4280         struct drm_i915_private *dev_priv = m->private;
4281         struct i915_hotplug *hotplug = &dev_priv->hotplug;
4282
4283         /* Synchronize with everything first in case there's been an HPD
4284          * storm, but we haven't finished handling it in the kernel yet
4285          */
4286         synchronize_irq(dev_priv->drm.irq);
4287         flush_work(&dev_priv->hotplug.dig_port_work);
4288         flush_work(&dev_priv->hotplug.hotplug_work);
4289
4290         seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
4291         seq_printf(m, "Detected: %s\n",
4292                    yesno(delayed_work_pending(&hotplug->reenable_work)));
4293
4294         return 0;
4295 }
4296
4297 static ssize_t i915_hpd_storm_ctl_write(struct file *file,
4298                                         const char __user *ubuf, size_t len,
4299                                         loff_t *offp)
4300 {
4301         struct seq_file *m = file->private_data;
4302         struct drm_i915_private *dev_priv = m->private;
4303         struct i915_hotplug *hotplug = &dev_priv->hotplug;
4304         unsigned int new_threshold;
4305         int i;
4306         char *newline;
4307         char tmp[16];
4308
4309         if (len >= sizeof(tmp))
4310                 return -EINVAL;
4311
4312         if (copy_from_user(tmp, ubuf, len))
4313                 return -EFAULT;
4314
4315         tmp[len] = '\0';
4316
4317         /* Strip newline, if any */
4318         newline = strchr(tmp, '\n');
4319         if (newline)
4320                 *newline = '\0';
4321
4322         if (strcmp(tmp, "reset") == 0)
4323                 new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
4324         else if (kstrtouint(tmp, 10, &new_threshold) != 0)
4325                 return -EINVAL;
4326
4327         if (new_threshold > 0)
4328                 DRM_DEBUG_KMS("Setting HPD storm detection threshold to %d\n",
4329                               new_threshold);
4330         else
4331                 DRM_DEBUG_KMS("Disabling HPD storm detection\n");
4332
4333         spin_lock_irq(&dev_priv->irq_lock);
4334         hotplug->hpd_storm_threshold = new_threshold;
4335         /* Reset the HPD storm stats so we don't accidentally trigger a storm */
4336         for_each_hpd_pin(i)
4337                 hotplug->stats[i].count = 0;
4338         spin_unlock_irq(&dev_priv->irq_lock);
4339
4340         /* Re-enable hpd immediately if we were in an irq storm */
4341         flush_delayed_work(&dev_priv->hotplug.reenable_work);
4342
4343         return len;
4344 }
4345
4346 static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
4347 {
4348         return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
4349 }
4350
4351 static const struct file_operations i915_hpd_storm_ctl_fops = {
4352         .owner = THIS_MODULE,
4353         .open = i915_hpd_storm_ctl_open,
4354         .read = seq_read,
4355         .llseek = seq_lseek,
4356         .release = single_release,
4357         .write = i915_hpd_storm_ctl_write
4358 };
4359
4360 static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data)
4361 {
4362         struct drm_i915_private *dev_priv = m->private;
4363
4364         seq_printf(m, "Enabled: %s\n",
4365                    yesno(dev_priv->hotplug.hpd_short_storm_enabled));
4366
4367         return 0;
4368 }
4369
4370 static int
4371 i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file)
4372 {
4373         return single_open(file, i915_hpd_short_storm_ctl_show,
4374                            inode->i_private);
4375 }
4376
4377 static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
4378                                               const char __user *ubuf,
4379                                               size_t len, loff_t *offp)
4380 {
4381         struct seq_file *m = file->private_data;
4382         struct drm_i915_private *dev_priv = m->private;
4383         struct i915_hotplug *hotplug = &dev_priv->hotplug;
4384         char *newline;
4385         char tmp[16];
4386         int i;
4387         bool new_state;
4388
4389         if (len >= sizeof(tmp))
4390                 return -EINVAL;
4391
4392         if (copy_from_user(tmp, ubuf, len))
4393                 return -EFAULT;
4394
4395         tmp[len] = '\0';
4396
4397         /* Strip newline, if any */
4398         newline = strchr(tmp, '\n');
4399         if (newline)
4400                 *newline = '\0';
4401
4402         /* Reset to the "default" state for this system */
4403         if (strcmp(tmp, "reset") == 0)
4404                 new_state = !HAS_DP_MST(dev_priv);
4405         else if (kstrtobool(tmp, &new_state) != 0)
4406                 return -EINVAL;
4407
4408         DRM_DEBUG_KMS("%sabling HPD short storm detection\n",
4409                       new_state ? "En" : "Dis");
4410
4411         spin_lock_irq(&dev_priv->irq_lock);
4412         hotplug->hpd_short_storm_enabled = new_state;
4413         /* Reset the HPD storm stats so we don't accidentally trigger a storm */
4414         for_each_hpd_pin(i)
4415                 hotplug->stats[i].count = 0;
4416         spin_unlock_irq(&dev_priv->irq_lock);
4417
4418         /* Re-enable hpd immediately if we were in an irq storm */
4419         flush_delayed_work(&dev_priv->hotplug.reenable_work);
4420
4421         return len;
4422 }
4423
4424 static const struct file_operations i915_hpd_short_storm_ctl_fops = {
4425         .owner = THIS_MODULE,
4426         .open = i915_hpd_short_storm_ctl_open,
4427         .read = seq_read,
4428         .llseek = seq_lseek,
4429         .release = single_release,
4430         .write = i915_hpd_short_storm_ctl_write,
4431 };
4432
4433 static int i915_drrs_ctl_set(void *data, u64 val)
4434 {
4435         struct drm_i915_private *dev_priv = data;
4436         struct drm_device *dev = &dev_priv->drm;
4437         struct intel_crtc *crtc;
4438
4439         if (INTEL_GEN(dev_priv) < 7)
4440                 return -ENODEV;
4441
4442         for_each_intel_crtc(dev, crtc) {
4443                 struct drm_connector_list_iter conn_iter;
4444                 struct intel_crtc_state *crtc_state;
4445                 struct drm_connector *connector;
4446                 struct drm_crtc_commit *commit;
4447                 int ret;
4448
4449                 ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex);
4450                 if (ret)
4451                         return ret;
4452
4453                 crtc_state = to_intel_crtc_state(crtc->base.state);
4454
4455                 if (!crtc_state->base.active ||
4456                     !crtc_state->has_drrs)
4457                         goto out;
4458
4459                 commit = crtc_state->base.commit;
4460                 if (commit) {
4461                         ret = wait_for_completion_interruptible(&commit->hw_done);
4462                         if (ret)
4463                                 goto out;
4464                 }
4465
4466                 drm_connector_list_iter_begin(dev, &conn_iter);
4467                 drm_for_each_connector_iter(connector, &conn_iter) {
4468                         struct intel_encoder *encoder;
4469                         struct intel_dp *intel_dp;
4470
4471                         if (!(crtc_state->base.connector_mask &
4472                               drm_connector_mask(connector)))
4473                                 continue;
4474
4475                         encoder = intel_attached_encoder(connector);
4476                         if (encoder->type != INTEL_OUTPUT_EDP)
4477                                 continue;
4478
4479                         DRM_DEBUG_DRIVER("Manually %sabling DRRS. %llu\n",
4480                                                 val ? "en" : "dis", val);
4481
4482                         intel_dp = enc_to_intel_dp(&encoder->base);
4483                         if (val)
4484                                 intel_edp_drrs_enable(intel_dp,
4485                                                       crtc_state);
4486                         else
4487                                 intel_edp_drrs_disable(intel_dp,
4488                                                        crtc_state);
4489                 }
4490                 drm_connector_list_iter_end(&conn_iter);
4491
4492 out:
4493                 drm_modeset_unlock(&crtc->base.mutex);
4494                 if (ret)
4495                         return ret;
4496         }
4497
4498         return 0;
4499 }
4500
4501 DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n");
4502
4503 static ssize_t
4504 i915_fifo_underrun_reset_write(struct file *filp,
4505                                const char __user *ubuf,
4506                                size_t cnt, loff_t *ppos)
4507 {
4508         struct drm_i915_private *dev_priv = filp->private_data;
4509         struct intel_crtc *intel_crtc;
4510         struct drm_device *dev = &dev_priv->drm;
4511         int ret;
4512         bool reset;
4513
4514         ret = kstrtobool_from_user(ubuf, cnt, &reset);
4515         if (ret)
4516                 return ret;
4517
4518         if (!reset)
4519                 return cnt;
4520
4521         for_each_intel_crtc(dev, intel_crtc) {
4522                 struct drm_crtc_commit *commit;
4523                 struct intel_crtc_state *crtc_state;
4524
4525                 ret = drm_modeset_lock_single_interruptible(&intel_crtc->base.mutex);
4526                 if (ret)
4527                         return ret;
4528
4529                 crtc_state = to_intel_crtc_state(intel_crtc->base.state);
4530                 commit = crtc_state->base.commit;
4531                 if (commit) {
4532                         ret = wait_for_completion_interruptible(&commit->hw_done);
4533                         if (!ret)
4534                                 ret = wait_for_completion_interruptible(&commit->flip_done);
4535                 }
4536
4537                 if (!ret && crtc_state->base.active) {
4538                         DRM_DEBUG_KMS("Re-arming FIFO underruns on pipe %c\n",
4539                                       pipe_name(intel_crtc->pipe));
4540
4541                         intel_crtc_arm_fifo_underrun(intel_crtc, crtc_state);
4542                 }
4543
4544                 drm_modeset_unlock(&intel_crtc->base.mutex);
4545
4546                 if (ret)
4547                         return ret;
4548         }
4549
4550         ret = intel_fbc_reset_underrun(dev_priv);
4551         if (ret)
4552                 return ret;
4553
4554         return cnt;
4555 }
4556
4557 static const struct file_operations i915_fifo_underrun_reset_ops = {
4558         .owner = THIS_MODULE,
4559         .open = simple_open,
4560         .write = i915_fifo_underrun_reset_write,
4561         .llseek = default_llseek,
4562 };
4563
4564 static const struct drm_info_list i915_debugfs_list[] = {
4565         {"i915_capabilities", i915_capabilities, 0},
4566         {"i915_gem_objects", i915_gem_object_info, 0},
4567         {"i915_gem_gtt", i915_gem_gtt_info, 0},
4568         {"i915_gem_stolen", i915_gem_stolen_list_info },
4569         {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
4570         {"i915_gem_interrupt", i915_interrupt_info, 0},
4571         {"i915_gem_batch_pool", i915_gem_batch_pool_info, 0},
4572         {"i915_guc_info", i915_guc_info, 0},
4573         {"i915_guc_load_status", i915_guc_load_status_info, 0},
4574         {"i915_guc_log_dump", i915_guc_log_dump, 0},
4575         {"i915_guc_load_err_log_dump", i915_guc_log_dump, 0, (void *)1},
4576         {"i915_guc_stage_pool", i915_guc_stage_pool, 0},
4577         {"i915_huc_load_status", i915_huc_load_status_info, 0},
4578         {"i915_frequency_info", i915_frequency_info, 0},
4579         {"i915_hangcheck_info", i915_hangcheck_info, 0},
4580         {"i915_reset_info", i915_reset_info, 0},
4581         {"i915_drpc_info", i915_drpc_info, 0},
4582         {"i915_emon_status", i915_emon_status, 0},
4583         {"i915_ring_freq_table", i915_ring_freq_table, 0},
4584         {"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
4585         {"i915_fbc_status", i915_fbc_status, 0},
4586         {"i915_ips_status", i915_ips_status, 0},
4587         {"i915_sr_status", i915_sr_status, 0},
4588         {"i915_opregion", i915_opregion, 0},
4589         {"i915_vbt", i915_vbt, 0},
4590         {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
4591         {"i915_context_status", i915_context_status, 0},
4592         {"i915_forcewake_domains", i915_forcewake_domains, 0},
4593         {"i915_swizzle_info", i915_swizzle_info, 0},
4594         {"i915_llc", i915_llc, 0},
4595         {"i915_edp_psr_status", i915_edp_psr_status, 0},
4596         {"i915_energy_uJ", i915_energy_uJ, 0},
4597         {"i915_runtime_pm_status", i915_runtime_pm_status, 0},
4598         {"i915_power_domain_info", i915_power_domain_info, 0},
4599         {"i915_dmc_info", i915_dmc_info, 0},
4600         {"i915_display_info", i915_display_info, 0},
4601         {"i915_engine_info", i915_engine_info, 0},
4602         {"i915_rcs_topology", i915_rcs_topology, 0},
4603         {"i915_shrinker_info", i915_shrinker_info, 0},
4604         {"i915_shared_dplls_info", i915_shared_dplls_info, 0},
4605         {"i915_dp_mst_info", i915_dp_mst_info, 0},
4606         {"i915_wa_registers", i915_wa_registers, 0},
4607         {"i915_ddb_info", i915_ddb_info, 0},
4608         {"i915_sseu_status", i915_sseu_status, 0},
4609         {"i915_drrs_status", i915_drrs_status, 0},
4610         {"i915_rps_boost_info", i915_rps_boost_info, 0},
4611 };
4612 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
4613
4614 static const struct i915_debugfs_files {
4615         const char *name;
4616         const struct file_operations *fops;
4617 } i915_debugfs_files[] = {
4618         {"i915_wedged", &i915_wedged_fops},
4619         {"i915_cache_sharing", &i915_cache_sharing_fops},
4620         {"i915_gem_drop_caches", &i915_drop_caches_fops},
4621 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
4622         {"i915_error_state", &i915_error_state_fops},
4623         {"i915_gpu_info", &i915_gpu_info_fops},
4624 #endif
4625         {"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
4626         {"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
4627         {"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
4628         {"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
4629         {"i915_fbc_false_color", &i915_fbc_false_color_fops},
4630         {"i915_dp_test_data", &i915_displayport_test_data_fops},
4631         {"i915_dp_test_type", &i915_displayport_test_type_fops},
4632         {"i915_dp_test_active", &i915_displayport_test_active_fops},
4633         {"i915_guc_log_level", &i915_guc_log_level_fops},
4634         {"i915_guc_log_relay", &i915_guc_log_relay_fops},
4635         {"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
4636         {"i915_hpd_short_storm_ctl", &i915_hpd_short_storm_ctl_fops},
4637         {"i915_ipc_status", &i915_ipc_status_fops},
4638         {"i915_drrs_ctl", &i915_drrs_ctl_fops},
4639         {"i915_edp_psr_debug", &i915_edp_psr_debug_fops}
4640 };
4641
4642 int i915_debugfs_register(struct drm_i915_private *dev_priv)
4643 {
4644         struct drm_minor *minor = dev_priv->drm.primary;
4645         struct dentry *ent;
4646         int i;
4647
4648         ent = debugfs_create_file("i915_forcewake_user", S_IRUSR,
4649                                   minor->debugfs_root, to_i915(minor->dev),
4650                                   &i915_forcewake_fops);
4651         if (!ent)
4652                 return -ENOMEM;
4653
4654         for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
4655                 ent = debugfs_create_file(i915_debugfs_files[i].name,
4656                                           S_IRUGO | S_IWUSR,
4657                                           minor->debugfs_root,
4658                                           to_i915(minor->dev),
4659                                           i915_debugfs_files[i].fops);
4660                 if (!ent)
4661                         return -ENOMEM;
4662         }
4663
4664         return drm_debugfs_create_files(i915_debugfs_list,
4665                                         I915_DEBUGFS_ENTRIES,
4666                                         minor->debugfs_root, minor);
4667 }
4668
4669 struct dpcd_block {
4670         /* DPCD dump start address. */
4671         unsigned int offset;
4672         /* DPCD dump end address, inclusive. If unset, .size will be used. */
4673         unsigned int end;
4674         /* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */
4675         size_t size;
4676         /* Only valid for eDP. */
4677         bool edp;
4678 };
4679
4680 static const struct dpcd_block i915_dpcd_debug[] = {
4681         { .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE },
4682         { .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS },
4683         { .offset = DP_DOWNSTREAM_PORT_0, .size = 16 },
4684         { .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET },
4685         { .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 },
4686         { .offset = DP_SET_POWER },
4687         { .offset = DP_EDP_DPCD_REV },
4688         { .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 },
4689         { .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB },
4690         { .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET },
4691 };
4692
4693 static int i915_dpcd_show(struct seq_file *m, void *data)
4694 {
4695         struct drm_connector *connector = m->private;
4696         struct intel_dp *intel_dp =
4697                 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4698         u8 buf[16];
4699         ssize_t err;
4700         int i;
4701
4702         if (connector->status != connector_status_connected)
4703                 return -ENODEV;
4704
4705         for (i = 0; i < ARRAY_SIZE(i915_dpcd_debug); i++) {
4706                 const struct dpcd_block *b = &i915_dpcd_debug[i];
4707                 size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1);
4708
4709                 if (b->edp &&
4710                     connector->connector_type != DRM_MODE_CONNECTOR_eDP)
4711                         continue;
4712
4713                 /* low tech for now */
4714                 if (WARN_ON(size > sizeof(buf)))
4715                         continue;
4716
4717                 err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size);
4718                 if (err < 0)
4719                         seq_printf(m, "%04x: ERROR %d\n", b->offset, (int)err);
4720                 else
4721                         seq_printf(m, "%04x: %*ph\n", b->offset, (int)err, buf);
4722         }
4723
4724         return 0;
4725 }
4726 DEFINE_SHOW_ATTRIBUTE(i915_dpcd);
4727
4728 static int i915_panel_show(struct seq_file *m, void *data)
4729 {
4730         struct drm_connector *connector = m->private;
4731         struct intel_dp *intel_dp =
4732                 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4733
4734         if (connector->status != connector_status_connected)
4735                 return -ENODEV;
4736
4737         seq_printf(m, "Panel power up delay: %d\n",
4738                    intel_dp->panel_power_up_delay);
4739         seq_printf(m, "Panel power down delay: %d\n",
4740                    intel_dp->panel_power_down_delay);
4741         seq_printf(m, "Backlight on delay: %d\n",
4742                    intel_dp->backlight_on_delay);
4743         seq_printf(m, "Backlight off delay: %d\n",
4744                    intel_dp->backlight_off_delay);
4745
4746         return 0;
4747 }
4748 DEFINE_SHOW_ATTRIBUTE(i915_panel);
4749
4750 static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
4751 {
4752         struct drm_connector *connector = m->private;
4753         struct intel_connector *intel_connector = to_intel_connector(connector);
4754
4755         if (connector->status != connector_status_connected)
4756                 return -ENODEV;
4757
4758         /* HDCP is supported by connector */
4759         if (!intel_connector->hdcp.shim)
4760                 return -EINVAL;
4761
4762         seq_printf(m, "%s:%d HDCP version: ", connector->name,
4763                    connector->base.id);
4764         seq_printf(m, "%s ", !intel_hdcp_capable(intel_connector) ?
4765                    "None" : "HDCP1.4");
4766         seq_puts(m, "\n");
4767
4768         return 0;
4769 }
4770 DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
4771
4772 static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
4773 {
4774         struct drm_connector *connector = m->private;
4775         struct drm_device *dev = connector->dev;
4776         struct drm_crtc *crtc;
4777         struct intel_dp *intel_dp;
4778         struct drm_modeset_acquire_ctx ctx;
4779         struct intel_crtc_state *crtc_state = NULL;
4780         int ret = 0;
4781         bool try_again = false;
4782
4783         drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
4784
4785         do {
4786                 try_again = false;
4787                 ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
4788                                        &ctx);
4789                 if (ret) {
4790                         ret = -EINTR;
4791                         break;
4792                 }
4793                 crtc = connector->state->crtc;
4794                 if (connector->status != connector_status_connected || !crtc) {
4795                         ret = -ENODEV;
4796                         break;
4797                 }
4798                 ret = drm_modeset_lock(&crtc->mutex, &ctx);
4799                 if (ret == -EDEADLK) {
4800                         ret = drm_modeset_backoff(&ctx);
4801                         if (!ret) {
4802                                 try_again = true;
4803                                 continue;
4804                         }
4805                         break;
4806                 } else if (ret) {
4807                         break;
4808                 }
4809                 intel_dp = enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4810                 crtc_state = to_intel_crtc_state(crtc->state);
4811                 seq_printf(m, "DSC_Enabled: %s\n",
4812                            yesno(crtc_state->dsc_params.compression_enable));
4813                 seq_printf(m, "DSC_Sink_Support: %s\n",
4814                            yesno(drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)));
4815                 if (!intel_dp_is_edp(intel_dp))
4816                         seq_printf(m, "FEC_Sink_Support: %s\n",
4817                                    yesno(drm_dp_sink_supports_fec(intel_dp->fec_capable)));
4818         } while (try_again);
4819
4820         drm_modeset_drop_locks(&ctx);
4821         drm_modeset_acquire_fini(&ctx);
4822
4823         return ret;
4824 }
4825
4826 static ssize_t i915_dsc_fec_support_write(struct file *file,
4827                                           const char __user *ubuf,
4828                                           size_t len, loff_t *offp)
4829 {
4830         bool dsc_enable = false;
4831         int ret;
4832         struct drm_connector *connector =
4833                 ((struct seq_file *)file->private_data)->private;
4834         struct intel_encoder *encoder = intel_attached_encoder(connector);
4835         struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
4836
4837         if (len == 0)
4838                 return 0;
4839
4840         DRM_DEBUG_DRIVER("Copied %zu bytes from user to force DSC\n",
4841                          len);
4842
4843         ret = kstrtobool_from_user(ubuf, len, &dsc_enable);
4844         if (ret < 0)
4845                 return ret;
4846
4847         DRM_DEBUG_DRIVER("Got %s for DSC Enable\n",
4848                          (dsc_enable) ? "true" : "false");
4849         intel_dp->force_dsc_en = dsc_enable;
4850
4851         *offp += len;
4852         return len;
4853 }
4854
4855 static int i915_dsc_fec_support_open(struct inode *inode,
4856                                      struct file *file)
4857 {
4858         return single_open(file, i915_dsc_fec_support_show,
4859                            inode->i_private);
4860 }
4861
4862 static const struct file_operations i915_dsc_fec_support_fops = {
4863         .owner = THIS_MODULE,
4864         .open = i915_dsc_fec_support_open,
4865         .read = seq_read,
4866         .llseek = seq_lseek,
4867         .release = single_release,
4868         .write = i915_dsc_fec_support_write
4869 };
4870
4871 /**
4872  * i915_debugfs_connector_add - add i915 specific connector debugfs files
4873  * @connector: pointer to a registered drm_connector
4874  *
4875  * Cleanup will be done by drm_connector_unregister() through a call to
4876  * drm_debugfs_connector_remove().
4877  *
4878  * Returns 0 on success, negative error codes on error.
4879  */
4880 int i915_debugfs_connector_add(struct drm_connector *connector)
4881 {
4882         struct dentry *root = connector->debugfs_entry;
4883         struct drm_i915_private *dev_priv = to_i915(connector->dev);
4884
4885         /* The connector must have been registered beforehands. */
4886         if (!root)
4887                 return -ENODEV;
4888
4889         if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4890             connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4891                 debugfs_create_file("i915_dpcd", S_IRUGO, root,
4892                                     connector, &i915_dpcd_fops);
4893
4894         if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
4895                 debugfs_create_file("i915_panel_timings", S_IRUGO, root,
4896                                     connector, &i915_panel_fops);
4897                 debugfs_create_file("i915_psr_sink_status", S_IRUGO, root,
4898                                     connector, &i915_psr_sink_status_fops);
4899         }
4900
4901         if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4902             connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
4903             connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) {
4904                 debugfs_create_file("i915_hdcp_sink_capability", S_IRUGO, root,
4905                                     connector, &i915_hdcp_sink_capability_fops);
4906         }
4907
4908         if (INTEL_GEN(dev_priv) >= 10 &&
4909             (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
4910              connector->connector_type == DRM_MODE_CONNECTOR_eDP))
4911                 debugfs_create_file("i915_dsc_fec_support", S_IRUGO, root,
4912                                     connector, &i915_dsc_fec_support_fops);
4913
4914         return 0;
4915 }