]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/gpu/drm/i915/i915_debugfs.c
5cadfcd03ea944f5613bba61d82a677f1c6ac88a
[linux.git] / drivers / gpu / drm / i915 / i915_debugfs.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Keith Packard <keithp@keithp.com>
26  *
27  */
28
29 #include <linux/debugfs.h>
30 #include <linux/sort.h>
31 #include <linux/sched/mm.h>
32 #include "intel_drv.h"
33 #include "intel_guc_submission.h"
34
35 static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
36 {
37         return to_i915(node->minor->dev);
38 }
39
40 static int i915_capabilities(struct seq_file *m, void *data)
41 {
42         struct drm_i915_private *dev_priv = node_to_i915(m->private);
43         const struct intel_device_info *info = INTEL_INFO(dev_priv);
44         struct drm_printer p = drm_seq_file_printer(m);
45
46         seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv));
47         seq_printf(m, "platform: %s\n", intel_platform_name(info->platform));
48         seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
49
50         intel_device_info_dump_flags(info, &p);
51         intel_device_info_dump_runtime(info, &p);
52         intel_driver_caps_print(&dev_priv->caps, &p);
53
54         kernel_param_lock(THIS_MODULE);
55         i915_params_dump(&i915_modparams, &p);
56         kernel_param_unlock(THIS_MODULE);
57
58         return 0;
59 }
60
61 static char get_active_flag(struct drm_i915_gem_object *obj)
62 {
63         return i915_gem_object_is_active(obj) ? '*' : ' ';
64 }
65
66 static char get_pin_flag(struct drm_i915_gem_object *obj)
67 {
68         return obj->pin_global ? 'p' : ' ';
69 }
70
71 static char get_tiling_flag(struct drm_i915_gem_object *obj)
72 {
73         switch (i915_gem_object_get_tiling(obj)) {
74         default:
75         case I915_TILING_NONE: return ' ';
76         case I915_TILING_X: return 'X';
77         case I915_TILING_Y: return 'Y';
78         }
79 }
80
81 static char get_global_flag(struct drm_i915_gem_object *obj)
82 {
83         return obj->userfault_count ? 'g' : ' ';
84 }
85
86 static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
87 {
88         return obj->mm.mapping ? 'M' : ' ';
89 }
90
91 static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
92 {
93         u64 size = 0;
94         struct i915_vma *vma;
95
96         for_each_ggtt_vma(vma, obj) {
97                 if (drm_mm_node_allocated(&vma->node))
98                         size += vma->node.size;
99         }
100
101         return size;
102 }
103
104 static const char *
105 stringify_page_sizes(unsigned int page_sizes, char *buf, size_t len)
106 {
107         size_t x = 0;
108
109         switch (page_sizes) {
110         case 0:
111                 return "";
112         case I915_GTT_PAGE_SIZE_4K:
113                 return "4K";
114         case I915_GTT_PAGE_SIZE_64K:
115                 return "64K";
116         case I915_GTT_PAGE_SIZE_2M:
117                 return "2M";
118         default:
119                 if (!buf)
120                         return "M";
121
122                 if (page_sizes & I915_GTT_PAGE_SIZE_2M)
123                         x += snprintf(buf + x, len - x, "2M, ");
124                 if (page_sizes & I915_GTT_PAGE_SIZE_64K)
125                         x += snprintf(buf + x, len - x, "64K, ");
126                 if (page_sizes & I915_GTT_PAGE_SIZE_4K)
127                         x += snprintf(buf + x, len - x, "4K, ");
128                 buf[x-2] = '\0';
129
130                 return buf;
131         }
132 }
133
134 static void
135 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
136 {
137         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
138         struct intel_engine_cs *engine;
139         struct i915_vma *vma;
140         unsigned int frontbuffer_bits;
141         int pin_count = 0;
142
143         lockdep_assert_held(&obj->base.dev->struct_mutex);
144
145         seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x %s%s%s",
146                    &obj->base,
147                    get_active_flag(obj),
148                    get_pin_flag(obj),
149                    get_tiling_flag(obj),
150                    get_global_flag(obj),
151                    get_pin_mapped_flag(obj),
152                    obj->base.size / 1024,
153                    obj->read_domains,
154                    obj->write_domain,
155                    i915_cache_level_str(dev_priv, obj->cache_level),
156                    obj->mm.dirty ? " dirty" : "",
157                    obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
158         if (obj->base.name)
159                 seq_printf(m, " (name: %d)", obj->base.name);
160         list_for_each_entry(vma, &obj->vma_list, obj_link) {
161                 if (i915_vma_is_pinned(vma))
162                         pin_count++;
163         }
164         seq_printf(m, " (pinned x %d)", pin_count);
165         if (obj->pin_global)
166                 seq_printf(m, " (global)");
167         list_for_each_entry(vma, &obj->vma_list, obj_link) {
168                 if (!drm_mm_node_allocated(&vma->node))
169                         continue;
170
171                 seq_printf(m, " (%sgtt offset: %08llx, size: %08llx, pages: %s",
172                            i915_vma_is_ggtt(vma) ? "g" : "pp",
173                            vma->node.start, vma->node.size,
174                            stringify_page_sizes(vma->page_sizes.gtt, NULL, 0));
175                 if (i915_vma_is_ggtt(vma)) {
176                         switch (vma->ggtt_view.type) {
177                         case I915_GGTT_VIEW_NORMAL:
178                                 seq_puts(m, ", normal");
179                                 break;
180
181                         case I915_GGTT_VIEW_PARTIAL:
182                                 seq_printf(m, ", partial [%08llx+%x]",
183                                            vma->ggtt_view.partial.offset << PAGE_SHIFT,
184                                            vma->ggtt_view.partial.size << PAGE_SHIFT);
185                                 break;
186
187                         case I915_GGTT_VIEW_ROTATED:
188                                 seq_printf(m, ", rotated [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
189                                            vma->ggtt_view.rotated.plane[0].width,
190                                            vma->ggtt_view.rotated.plane[0].height,
191                                            vma->ggtt_view.rotated.plane[0].stride,
192                                            vma->ggtt_view.rotated.plane[0].offset,
193                                            vma->ggtt_view.rotated.plane[1].width,
194                                            vma->ggtt_view.rotated.plane[1].height,
195                                            vma->ggtt_view.rotated.plane[1].stride,
196                                            vma->ggtt_view.rotated.plane[1].offset);
197                                 break;
198
199                         default:
200                                 MISSING_CASE(vma->ggtt_view.type);
201                                 break;
202                         }
203                 }
204                 if (vma->fence)
205                         seq_printf(m, " , fence: %d%s",
206                                    vma->fence->id,
207                                    i915_gem_active_isset(&vma->last_fence) ? "*" : "");
208                 seq_puts(m, ")");
209         }
210         if (obj->stolen)
211                 seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
212
213         engine = i915_gem_object_last_write_engine(obj);
214         if (engine)
215                 seq_printf(m, " (%s)", engine->name);
216
217         frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
218         if (frontbuffer_bits)
219                 seq_printf(m, " (frontbuffer: 0x%03x)", frontbuffer_bits);
220 }
221
222 static int obj_rank_by_stolen(const void *A, const void *B)
223 {
224         const struct drm_i915_gem_object *a =
225                 *(const struct drm_i915_gem_object **)A;
226         const struct drm_i915_gem_object *b =
227                 *(const struct drm_i915_gem_object **)B;
228
229         if (a->stolen->start < b->stolen->start)
230                 return -1;
231         if (a->stolen->start > b->stolen->start)
232                 return 1;
233         return 0;
234 }
235
236 static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
237 {
238         struct drm_i915_private *dev_priv = node_to_i915(m->private);
239         struct drm_device *dev = &dev_priv->drm;
240         struct drm_i915_gem_object **objects;
241         struct drm_i915_gem_object *obj;
242         u64 total_obj_size, total_gtt_size;
243         unsigned long total, count, n;
244         int ret;
245
246         total = READ_ONCE(dev_priv->mm.object_count);
247         objects = kvmalloc_array(total, sizeof(*objects), GFP_KERNEL);
248         if (!objects)
249                 return -ENOMEM;
250
251         ret = mutex_lock_interruptible(&dev->struct_mutex);
252         if (ret)
253                 goto out;
254
255         total_obj_size = total_gtt_size = count = 0;
256
257         spin_lock(&dev_priv->mm.obj_lock);
258         list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
259                 if (count == total)
260                         break;
261
262                 if (obj->stolen == NULL)
263                         continue;
264
265                 objects[count++] = obj;
266                 total_obj_size += obj->base.size;
267                 total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
268
269         }
270         list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
271                 if (count == total)
272                         break;
273
274                 if (obj->stolen == NULL)
275                         continue;
276
277                 objects[count++] = obj;
278                 total_obj_size += obj->base.size;
279         }
280         spin_unlock(&dev_priv->mm.obj_lock);
281
282         sort(objects, count, sizeof(*objects), obj_rank_by_stolen, NULL);
283
284         seq_puts(m, "Stolen:\n");
285         for (n = 0; n < count; n++) {
286                 seq_puts(m, "   ");
287                 describe_obj(m, objects[n]);
288                 seq_putc(m, '\n');
289         }
290         seq_printf(m, "Total %lu objects, %llu bytes, %llu GTT size\n",
291                    count, total_obj_size, total_gtt_size);
292
293         mutex_unlock(&dev->struct_mutex);
294 out:
295         kvfree(objects);
296         return ret;
297 }
298
299 struct file_stats {
300         struct drm_i915_file_private *file_priv;
301         unsigned long count;
302         u64 total, unbound;
303         u64 global, shared;
304         u64 active, inactive;
305 };
306
307 static int per_file_stats(int id, void *ptr, void *data)
308 {
309         struct drm_i915_gem_object *obj = ptr;
310         struct file_stats *stats = data;
311         struct i915_vma *vma;
312
313         lockdep_assert_held(&obj->base.dev->struct_mutex);
314
315         stats->count++;
316         stats->total += obj->base.size;
317         if (!obj->bind_count)
318                 stats->unbound += obj->base.size;
319         if (obj->base.name || obj->base.dma_buf)
320                 stats->shared += obj->base.size;
321
322         list_for_each_entry(vma, &obj->vma_list, obj_link) {
323                 if (!drm_mm_node_allocated(&vma->node))
324                         continue;
325
326                 if (i915_vma_is_ggtt(vma)) {
327                         stats->global += vma->node.size;
328                 } else {
329                         struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vma->vm);
330
331                         if (ppgtt->vm.file != stats->file_priv)
332                                 continue;
333                 }
334
335                 if (i915_vma_is_active(vma))
336                         stats->active += vma->node.size;
337                 else
338                         stats->inactive += vma->node.size;
339         }
340
341         return 0;
342 }
343
344 #define print_file_stats(m, name, stats) do { \
345         if (stats.count) \
346                 seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound)\n", \
347                            name, \
348                            stats.count, \
349                            stats.total, \
350                            stats.active, \
351                            stats.inactive, \
352                            stats.global, \
353                            stats.shared, \
354                            stats.unbound); \
355 } while (0)
356
357 static void print_batch_pool_stats(struct seq_file *m,
358                                    struct drm_i915_private *dev_priv)
359 {
360         struct drm_i915_gem_object *obj;
361         struct file_stats stats;
362         struct intel_engine_cs *engine;
363         enum intel_engine_id id;
364         int j;
365
366         memset(&stats, 0, sizeof(stats));
367
368         for_each_engine(engine, dev_priv, id) {
369                 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
370                         list_for_each_entry(obj,
371                                             &engine->batch_pool.cache_list[j],
372                                             batch_pool_link)
373                                 per_file_stats(0, obj, &stats);
374                 }
375         }
376
377         print_file_stats(m, "[k]batch pool", stats);
378 }
379
380 static int per_file_ctx_stats(int idx, void *ptr, void *data)
381 {
382         struct i915_gem_context *ctx = ptr;
383         struct intel_engine_cs *engine;
384         enum intel_engine_id id;
385
386         for_each_engine(engine, ctx->i915, id) {
387                 struct intel_context *ce = to_intel_context(ctx, engine);
388
389                 if (ce->state)
390                         per_file_stats(0, ce->state->obj, data);
391                 if (ce->ring)
392                         per_file_stats(0, ce->ring->vma->obj, data);
393         }
394
395         return 0;
396 }
397
398 static void print_context_stats(struct seq_file *m,
399                                 struct drm_i915_private *dev_priv)
400 {
401         struct drm_device *dev = &dev_priv->drm;
402         struct file_stats stats;
403         struct drm_file *file;
404
405         memset(&stats, 0, sizeof(stats));
406
407         mutex_lock(&dev->struct_mutex);
408         if (dev_priv->kernel_context)
409                 per_file_ctx_stats(0, dev_priv->kernel_context, &stats);
410
411         list_for_each_entry(file, &dev->filelist, lhead) {
412                 struct drm_i915_file_private *fpriv = file->driver_priv;
413                 idr_for_each(&fpriv->context_idr, per_file_ctx_stats, &stats);
414         }
415         mutex_unlock(&dev->struct_mutex);
416
417         print_file_stats(m, "[k]contexts", stats);
418 }
419
420 static int i915_gem_object_info(struct seq_file *m, void *data)
421 {
422         struct drm_i915_private *dev_priv = node_to_i915(m->private);
423         struct drm_device *dev = &dev_priv->drm;
424         struct i915_ggtt *ggtt = &dev_priv->ggtt;
425         u32 count, mapped_count, purgeable_count, dpy_count, huge_count;
426         u64 size, mapped_size, purgeable_size, dpy_size, huge_size;
427         struct drm_i915_gem_object *obj;
428         unsigned int page_sizes = 0;
429         struct drm_file *file;
430         char buf[80];
431         int ret;
432
433         ret = mutex_lock_interruptible(&dev->struct_mutex);
434         if (ret)
435                 return ret;
436
437         seq_printf(m, "%u objects, %llu bytes\n",
438                    dev_priv->mm.object_count,
439                    dev_priv->mm.object_memory);
440
441         size = count = 0;
442         mapped_size = mapped_count = 0;
443         purgeable_size = purgeable_count = 0;
444         huge_size = huge_count = 0;
445
446         spin_lock(&dev_priv->mm.obj_lock);
447         list_for_each_entry(obj, &dev_priv->mm.unbound_list, mm.link) {
448                 size += obj->base.size;
449                 ++count;
450
451                 if (obj->mm.madv == I915_MADV_DONTNEED) {
452                         purgeable_size += obj->base.size;
453                         ++purgeable_count;
454                 }
455
456                 if (obj->mm.mapping) {
457                         mapped_count++;
458                         mapped_size += obj->base.size;
459                 }
460
461                 if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
462                         huge_count++;
463                         huge_size += obj->base.size;
464                         page_sizes |= obj->mm.page_sizes.sg;
465                 }
466         }
467         seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
468
469         size = count = dpy_size = dpy_count = 0;
470         list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
471                 size += obj->base.size;
472                 ++count;
473
474                 if (obj->pin_global) {
475                         dpy_size += obj->base.size;
476                         ++dpy_count;
477                 }
478
479                 if (obj->mm.madv == I915_MADV_DONTNEED) {
480                         purgeable_size += obj->base.size;
481                         ++purgeable_count;
482                 }
483
484                 if (obj->mm.mapping) {
485                         mapped_count++;
486                         mapped_size += obj->base.size;
487                 }
488
489                 if (obj->mm.page_sizes.sg > I915_GTT_PAGE_SIZE) {
490                         huge_count++;
491                         huge_size += obj->base.size;
492                         page_sizes |= obj->mm.page_sizes.sg;
493                 }
494         }
495         spin_unlock(&dev_priv->mm.obj_lock);
496
497         seq_printf(m, "%u bound objects, %llu bytes\n",
498                    count, size);
499         seq_printf(m, "%u purgeable objects, %llu bytes\n",
500                    purgeable_count, purgeable_size);
501         seq_printf(m, "%u mapped objects, %llu bytes\n",
502                    mapped_count, mapped_size);
503         seq_printf(m, "%u huge-paged objects (%s) %llu bytes\n",
504                    huge_count,
505                    stringify_page_sizes(page_sizes, buf, sizeof(buf)),
506                    huge_size);
507         seq_printf(m, "%u display objects (globally pinned), %llu bytes\n",
508                    dpy_count, dpy_size);
509
510         seq_printf(m, "%llu [%pa] gtt total\n",
511                    ggtt->vm.total, &ggtt->mappable_end);
512         seq_printf(m, "Supported page sizes: %s\n",
513                    stringify_page_sizes(INTEL_INFO(dev_priv)->page_sizes,
514                                         buf, sizeof(buf)));
515
516         seq_putc(m, '\n');
517         print_batch_pool_stats(m, dev_priv);
518         mutex_unlock(&dev->struct_mutex);
519
520         mutex_lock(&dev->filelist_mutex);
521         print_context_stats(m, dev_priv);
522         list_for_each_entry_reverse(file, &dev->filelist, lhead) {
523                 struct file_stats stats;
524                 struct drm_i915_file_private *file_priv = file->driver_priv;
525                 struct i915_request *request;
526                 struct task_struct *task;
527
528                 mutex_lock(&dev->struct_mutex);
529
530                 memset(&stats, 0, sizeof(stats));
531                 stats.file_priv = file->driver_priv;
532                 spin_lock(&file->table_lock);
533                 idr_for_each(&file->object_idr, per_file_stats, &stats);
534                 spin_unlock(&file->table_lock);
535                 /*
536                  * Although we have a valid reference on file->pid, that does
537                  * not guarantee that the task_struct who called get_pid() is
538                  * still alive (e.g. get_pid(current) => fork() => exit()).
539                  * Therefore, we need to protect this ->comm access using RCU.
540                  */
541                 request = list_first_entry_or_null(&file_priv->mm.request_list,
542                                                    struct i915_request,
543                                                    client_link);
544                 rcu_read_lock();
545                 task = pid_task(request && request->gem_context->pid ?
546                                 request->gem_context->pid : file->pid,
547                                 PIDTYPE_PID);
548                 print_file_stats(m, task ? task->comm : "<unknown>", stats);
549                 rcu_read_unlock();
550
551                 mutex_unlock(&dev->struct_mutex);
552         }
553         mutex_unlock(&dev->filelist_mutex);
554
555         return 0;
556 }
557
558 static int i915_gem_gtt_info(struct seq_file *m, void *data)
559 {
560         struct drm_info_node *node = m->private;
561         struct drm_i915_private *dev_priv = node_to_i915(node);
562         struct drm_device *dev = &dev_priv->drm;
563         struct drm_i915_gem_object **objects;
564         struct drm_i915_gem_object *obj;
565         u64 total_obj_size, total_gtt_size;
566         unsigned long nobject, n;
567         int count, ret;
568
569         nobject = READ_ONCE(dev_priv->mm.object_count);
570         objects = kvmalloc_array(nobject, sizeof(*objects), GFP_KERNEL);
571         if (!objects)
572                 return -ENOMEM;
573
574         ret = mutex_lock_interruptible(&dev->struct_mutex);
575         if (ret)
576                 return ret;
577
578         count = 0;
579         spin_lock(&dev_priv->mm.obj_lock);
580         list_for_each_entry(obj, &dev_priv->mm.bound_list, mm.link) {
581                 objects[count++] = obj;
582                 if (count == nobject)
583                         break;
584         }
585         spin_unlock(&dev_priv->mm.obj_lock);
586
587         total_obj_size = total_gtt_size = 0;
588         for (n = 0;  n < count; n++) {
589                 obj = objects[n];
590
591                 seq_puts(m, "   ");
592                 describe_obj(m, obj);
593                 seq_putc(m, '\n');
594                 total_obj_size += obj->base.size;
595                 total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
596         }
597
598         mutex_unlock(&dev->struct_mutex);
599
600         seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
601                    count, total_obj_size, total_gtt_size);
602         kvfree(objects);
603
604         return 0;
605 }
606
607 static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
608 {
609         struct drm_i915_private *dev_priv = node_to_i915(m->private);
610         struct drm_device *dev = &dev_priv->drm;
611         struct drm_i915_gem_object *obj;
612         struct intel_engine_cs *engine;
613         enum intel_engine_id id;
614         int total = 0;
615         int ret, j;
616
617         ret = mutex_lock_interruptible(&dev->struct_mutex);
618         if (ret)
619                 return ret;
620
621         for_each_engine(engine, dev_priv, id) {
622                 for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
623                         int count;
624
625                         count = 0;
626                         list_for_each_entry(obj,
627                                             &engine->batch_pool.cache_list[j],
628                                             batch_pool_link)
629                                 count++;
630                         seq_printf(m, "%s cache[%d]: %d objects\n",
631                                    engine->name, j, count);
632
633                         list_for_each_entry(obj,
634                                             &engine->batch_pool.cache_list[j],
635                                             batch_pool_link) {
636                                 seq_puts(m, "   ");
637                                 describe_obj(m, obj);
638                                 seq_putc(m, '\n');
639                         }
640
641                         total += count;
642                 }
643         }
644
645         seq_printf(m, "total: %d\n", total);
646
647         mutex_unlock(&dev->struct_mutex);
648
649         return 0;
650 }
651
652 static void gen8_display_interrupt_info(struct seq_file *m)
653 {
654         struct drm_i915_private *dev_priv = node_to_i915(m->private);
655         int pipe;
656
657         for_each_pipe(dev_priv, pipe) {
658                 enum intel_display_power_domain power_domain;
659
660                 power_domain = POWER_DOMAIN_PIPE(pipe);
661                 if (!intel_display_power_get_if_enabled(dev_priv,
662                                                         power_domain)) {
663                         seq_printf(m, "Pipe %c power disabled\n",
664                                    pipe_name(pipe));
665                         continue;
666                 }
667                 seq_printf(m, "Pipe %c IMR:\t%08x\n",
668                            pipe_name(pipe),
669                            I915_READ(GEN8_DE_PIPE_IMR(pipe)));
670                 seq_printf(m, "Pipe %c IIR:\t%08x\n",
671                            pipe_name(pipe),
672                            I915_READ(GEN8_DE_PIPE_IIR(pipe)));
673                 seq_printf(m, "Pipe %c IER:\t%08x\n",
674                            pipe_name(pipe),
675                            I915_READ(GEN8_DE_PIPE_IER(pipe)));
676
677                 intel_display_power_put(dev_priv, power_domain);
678         }
679
680         seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
681                    I915_READ(GEN8_DE_PORT_IMR));
682         seq_printf(m, "Display Engine port interrupt identity:\t%08x\n",
683                    I915_READ(GEN8_DE_PORT_IIR));
684         seq_printf(m, "Display Engine port interrupt enable:\t%08x\n",
685                    I915_READ(GEN8_DE_PORT_IER));
686
687         seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n",
688                    I915_READ(GEN8_DE_MISC_IMR));
689         seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n",
690                    I915_READ(GEN8_DE_MISC_IIR));
691         seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n",
692                    I915_READ(GEN8_DE_MISC_IER));
693
694         seq_printf(m, "PCU interrupt mask:\t%08x\n",
695                    I915_READ(GEN8_PCU_IMR));
696         seq_printf(m, "PCU interrupt identity:\t%08x\n",
697                    I915_READ(GEN8_PCU_IIR));
698         seq_printf(m, "PCU interrupt enable:\t%08x\n",
699                    I915_READ(GEN8_PCU_IER));
700 }
701
702 static int i915_interrupt_info(struct seq_file *m, void *data)
703 {
704         struct drm_i915_private *dev_priv = node_to_i915(m->private);
705         struct intel_engine_cs *engine;
706         enum intel_engine_id id;
707         int i, pipe;
708
709         intel_runtime_pm_get(dev_priv);
710
711         if (IS_CHERRYVIEW(dev_priv)) {
712                 seq_printf(m, "Master Interrupt Control:\t%08x\n",
713                            I915_READ(GEN8_MASTER_IRQ));
714
715                 seq_printf(m, "Display IER:\t%08x\n",
716                            I915_READ(VLV_IER));
717                 seq_printf(m, "Display IIR:\t%08x\n",
718                            I915_READ(VLV_IIR));
719                 seq_printf(m, "Display IIR_RW:\t%08x\n",
720                            I915_READ(VLV_IIR_RW));
721                 seq_printf(m, "Display IMR:\t%08x\n",
722                            I915_READ(VLV_IMR));
723                 for_each_pipe(dev_priv, pipe) {
724                         enum intel_display_power_domain power_domain;
725
726                         power_domain = POWER_DOMAIN_PIPE(pipe);
727                         if (!intel_display_power_get_if_enabled(dev_priv,
728                                                                 power_domain)) {
729                                 seq_printf(m, "Pipe %c power disabled\n",
730                                            pipe_name(pipe));
731                                 continue;
732                         }
733
734                         seq_printf(m, "Pipe %c stat:\t%08x\n",
735                                    pipe_name(pipe),
736                                    I915_READ(PIPESTAT(pipe)));
737
738                         intel_display_power_put(dev_priv, power_domain);
739                 }
740
741                 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
742                 seq_printf(m, "Port hotplug:\t%08x\n",
743                            I915_READ(PORT_HOTPLUG_EN));
744                 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
745                            I915_READ(VLV_DPFLIPSTAT));
746                 seq_printf(m, "DPINVGTT:\t%08x\n",
747                            I915_READ(DPINVGTT));
748                 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
749
750                 for (i = 0; i < 4; i++) {
751                         seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
752                                    i, I915_READ(GEN8_GT_IMR(i)));
753                         seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
754                                    i, I915_READ(GEN8_GT_IIR(i)));
755                         seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
756                                    i, I915_READ(GEN8_GT_IER(i)));
757                 }
758
759                 seq_printf(m, "PCU interrupt mask:\t%08x\n",
760                            I915_READ(GEN8_PCU_IMR));
761                 seq_printf(m, "PCU interrupt identity:\t%08x\n",
762                            I915_READ(GEN8_PCU_IIR));
763                 seq_printf(m, "PCU interrupt enable:\t%08x\n",
764                            I915_READ(GEN8_PCU_IER));
765         } else if (INTEL_GEN(dev_priv) >= 11) {
766                 seq_printf(m, "Master Interrupt Control:  %08x\n",
767                            I915_READ(GEN11_GFX_MSTR_IRQ));
768
769                 seq_printf(m, "Render/Copy Intr Enable:   %08x\n",
770                            I915_READ(GEN11_RENDER_COPY_INTR_ENABLE));
771                 seq_printf(m, "VCS/VECS Intr Enable:      %08x\n",
772                            I915_READ(GEN11_VCS_VECS_INTR_ENABLE));
773                 seq_printf(m, "GUC/SG Intr Enable:\t   %08x\n",
774                            I915_READ(GEN11_GUC_SG_INTR_ENABLE));
775                 seq_printf(m, "GPM/WGBOXPERF Intr Enable: %08x\n",
776                            I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE));
777                 seq_printf(m, "Crypto Intr Enable:\t   %08x\n",
778                            I915_READ(GEN11_CRYPTO_RSVD_INTR_ENABLE));
779                 seq_printf(m, "GUnit/CSME Intr Enable:\t   %08x\n",
780                            I915_READ(GEN11_GUNIT_CSME_INTR_ENABLE));
781
782                 seq_printf(m, "Display Interrupt Control:\t%08x\n",
783                            I915_READ(GEN11_DISPLAY_INT_CTL));
784
785                 gen8_display_interrupt_info(m);
786         } else if (INTEL_GEN(dev_priv) >= 8) {
787                 seq_printf(m, "Master Interrupt Control:\t%08x\n",
788                            I915_READ(GEN8_MASTER_IRQ));
789
790                 for (i = 0; i < 4; i++) {
791                         seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
792                                    i, I915_READ(GEN8_GT_IMR(i)));
793                         seq_printf(m, "GT Interrupt IIR %d:\t%08x\n",
794                                    i, I915_READ(GEN8_GT_IIR(i)));
795                         seq_printf(m, "GT Interrupt IER %d:\t%08x\n",
796                                    i, I915_READ(GEN8_GT_IER(i)));
797                 }
798
799                 gen8_display_interrupt_info(m);
800         } else if (IS_VALLEYVIEW(dev_priv)) {
801                 seq_printf(m, "Display IER:\t%08x\n",
802                            I915_READ(VLV_IER));
803                 seq_printf(m, "Display IIR:\t%08x\n",
804                            I915_READ(VLV_IIR));
805                 seq_printf(m, "Display IIR_RW:\t%08x\n",
806                            I915_READ(VLV_IIR_RW));
807                 seq_printf(m, "Display IMR:\t%08x\n",
808                            I915_READ(VLV_IMR));
809                 for_each_pipe(dev_priv, pipe) {
810                         enum intel_display_power_domain power_domain;
811
812                         power_domain = POWER_DOMAIN_PIPE(pipe);
813                         if (!intel_display_power_get_if_enabled(dev_priv,
814                                                                 power_domain)) {
815                                 seq_printf(m, "Pipe %c power disabled\n",
816                                            pipe_name(pipe));
817                                 continue;
818                         }
819
820                         seq_printf(m, "Pipe %c stat:\t%08x\n",
821                                    pipe_name(pipe),
822                                    I915_READ(PIPESTAT(pipe)));
823                         intel_display_power_put(dev_priv, power_domain);
824                 }
825
826                 seq_printf(m, "Master IER:\t%08x\n",
827                            I915_READ(VLV_MASTER_IER));
828
829                 seq_printf(m, "Render IER:\t%08x\n",
830                            I915_READ(GTIER));
831                 seq_printf(m, "Render IIR:\t%08x\n",
832                            I915_READ(GTIIR));
833                 seq_printf(m, "Render IMR:\t%08x\n",
834                            I915_READ(GTIMR));
835
836                 seq_printf(m, "PM IER:\t\t%08x\n",
837                            I915_READ(GEN6_PMIER));
838                 seq_printf(m, "PM IIR:\t\t%08x\n",
839                            I915_READ(GEN6_PMIIR));
840                 seq_printf(m, "PM IMR:\t\t%08x\n",
841                            I915_READ(GEN6_PMIMR));
842
843                 seq_printf(m, "Port hotplug:\t%08x\n",
844                            I915_READ(PORT_HOTPLUG_EN));
845                 seq_printf(m, "DPFLIPSTAT:\t%08x\n",
846                            I915_READ(VLV_DPFLIPSTAT));
847                 seq_printf(m, "DPINVGTT:\t%08x\n",
848                            I915_READ(DPINVGTT));
849
850         } else if (!HAS_PCH_SPLIT(dev_priv)) {
851                 seq_printf(m, "Interrupt enable:    %08x\n",
852                            I915_READ(IER));
853                 seq_printf(m, "Interrupt identity:  %08x\n",
854                            I915_READ(IIR));
855                 seq_printf(m, "Interrupt mask:      %08x\n",
856                            I915_READ(IMR));
857                 for_each_pipe(dev_priv, pipe)
858                         seq_printf(m, "Pipe %c stat:         %08x\n",
859                                    pipe_name(pipe),
860                                    I915_READ(PIPESTAT(pipe)));
861         } else {
862                 seq_printf(m, "North Display Interrupt enable:          %08x\n",
863                            I915_READ(DEIER));
864                 seq_printf(m, "North Display Interrupt identity:        %08x\n",
865                            I915_READ(DEIIR));
866                 seq_printf(m, "North Display Interrupt mask:            %08x\n",
867                            I915_READ(DEIMR));
868                 seq_printf(m, "South Display Interrupt enable:          %08x\n",
869                            I915_READ(SDEIER));
870                 seq_printf(m, "South Display Interrupt identity:        %08x\n",
871                            I915_READ(SDEIIR));
872                 seq_printf(m, "South Display Interrupt mask:            %08x\n",
873                            I915_READ(SDEIMR));
874                 seq_printf(m, "Graphics Interrupt enable:               %08x\n",
875                            I915_READ(GTIER));
876                 seq_printf(m, "Graphics Interrupt identity:             %08x\n",
877                            I915_READ(GTIIR));
878                 seq_printf(m, "Graphics Interrupt mask:         %08x\n",
879                            I915_READ(GTIMR));
880         }
881
882         if (INTEL_GEN(dev_priv) >= 11) {
883                 seq_printf(m, "RCS Intr Mask:\t %08x\n",
884                            I915_READ(GEN11_RCS0_RSVD_INTR_MASK));
885                 seq_printf(m, "BCS Intr Mask:\t %08x\n",
886                            I915_READ(GEN11_BCS_RSVD_INTR_MASK));
887                 seq_printf(m, "VCS0/VCS1 Intr Mask:\t %08x\n",
888                            I915_READ(GEN11_VCS0_VCS1_INTR_MASK));
889                 seq_printf(m, "VCS2/VCS3 Intr Mask:\t %08x\n",
890                            I915_READ(GEN11_VCS2_VCS3_INTR_MASK));
891                 seq_printf(m, "VECS0/VECS1 Intr Mask:\t %08x\n",
892                            I915_READ(GEN11_VECS0_VECS1_INTR_MASK));
893                 seq_printf(m, "GUC/SG Intr Mask:\t %08x\n",
894                            I915_READ(GEN11_GUC_SG_INTR_MASK));
895                 seq_printf(m, "GPM/WGBOXPERF Intr Mask: %08x\n",
896                            I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK));
897                 seq_printf(m, "Crypto Intr Mask:\t %08x\n",
898                            I915_READ(GEN11_CRYPTO_RSVD_INTR_MASK));
899                 seq_printf(m, "Gunit/CSME Intr Mask:\t %08x\n",
900                            I915_READ(GEN11_GUNIT_CSME_INTR_MASK));
901
902         } else if (INTEL_GEN(dev_priv) >= 6) {
903                 for_each_engine(engine, dev_priv, id) {
904                         seq_printf(m,
905                                    "Graphics Interrupt mask (%s):       %08x\n",
906                                    engine->name, I915_READ_IMR(engine));
907                 }
908         }
909
910         intel_runtime_pm_put(dev_priv);
911
912         return 0;
913 }
914
915 static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
916 {
917         struct drm_i915_private *dev_priv = node_to_i915(m->private);
918         struct drm_device *dev = &dev_priv->drm;
919         int i, ret;
920
921         ret = mutex_lock_interruptible(&dev->struct_mutex);
922         if (ret)
923                 return ret;
924
925         seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
926         for (i = 0; i < dev_priv->num_fence_regs; i++) {
927                 struct i915_vma *vma = dev_priv->fence_regs[i].vma;
928
929                 seq_printf(m, "Fence %d, pin count = %d, object = ",
930                            i, dev_priv->fence_regs[i].pin_count);
931                 if (!vma)
932                         seq_puts(m, "unused");
933                 else
934                         describe_obj(m, vma->obj);
935                 seq_putc(m, '\n');
936         }
937
938         mutex_unlock(&dev->struct_mutex);
939         return 0;
940 }
941
942 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
943 static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
944                               size_t count, loff_t *pos)
945 {
946         struct i915_gpu_state *error = file->private_data;
947         struct drm_i915_error_state_buf str;
948         ssize_t ret;
949         loff_t tmp;
950
951         if (!error)
952                 return 0;
953
954         ret = i915_error_state_buf_init(&str, error->i915, count, *pos);
955         if (ret)
956                 return ret;
957
958         ret = i915_error_state_to_str(&str, error);
959         if (ret)
960                 goto out;
961
962         tmp = 0;
963         ret = simple_read_from_buffer(ubuf, count, &tmp, str.buf, str.bytes);
964         if (ret < 0)
965                 goto out;
966
967         *pos = str.start + ret;
968 out:
969         i915_error_state_buf_release(&str);
970         return ret;
971 }
972
973 static int gpu_state_release(struct inode *inode, struct file *file)
974 {
975         i915_gpu_state_put(file->private_data);
976         return 0;
977 }
978
979 static int i915_gpu_info_open(struct inode *inode, struct file *file)
980 {
981         struct drm_i915_private *i915 = inode->i_private;
982         struct i915_gpu_state *gpu;
983
984         intel_runtime_pm_get(i915);
985         gpu = i915_capture_gpu_state(i915);
986         intel_runtime_pm_put(i915);
987         if (!gpu)
988                 return -ENOMEM;
989
990         file->private_data = gpu;
991         return 0;
992 }
993
994 static const struct file_operations i915_gpu_info_fops = {
995         .owner = THIS_MODULE,
996         .open = i915_gpu_info_open,
997         .read = gpu_state_read,
998         .llseek = default_llseek,
999         .release = gpu_state_release,
1000 };
1001
1002 static ssize_t
1003 i915_error_state_write(struct file *filp,
1004                        const char __user *ubuf,
1005                        size_t cnt,
1006                        loff_t *ppos)
1007 {
1008         struct i915_gpu_state *error = filp->private_data;
1009
1010         if (!error)
1011                 return 0;
1012
1013         DRM_DEBUG_DRIVER("Resetting error state\n");
1014         i915_reset_error_state(error->i915);
1015
1016         return cnt;
1017 }
1018
1019 static int i915_error_state_open(struct inode *inode, struct file *file)
1020 {
1021         file->private_data = i915_first_error_state(inode->i_private);
1022         return 0;
1023 }
1024
1025 static const struct file_operations i915_error_state_fops = {
1026         .owner = THIS_MODULE,
1027         .open = i915_error_state_open,
1028         .read = gpu_state_read,
1029         .write = i915_error_state_write,
1030         .llseek = default_llseek,
1031         .release = gpu_state_release,
1032 };
1033 #endif
1034
1035 static int
1036 i915_next_seqno_set(void *data, u64 val)
1037 {
1038         struct drm_i915_private *dev_priv = data;
1039         struct drm_device *dev = &dev_priv->drm;
1040         int ret;
1041
1042         ret = mutex_lock_interruptible(&dev->struct_mutex);
1043         if (ret)
1044                 return ret;
1045
1046         intel_runtime_pm_get(dev_priv);
1047         ret = i915_gem_set_global_seqno(dev, val);
1048         intel_runtime_pm_put(dev_priv);
1049
1050         mutex_unlock(&dev->struct_mutex);
1051
1052         return ret;
1053 }
1054
1055 DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
1056                         NULL, i915_next_seqno_set,
1057                         "0x%llx\n");
1058
1059 static int i915_frequency_info(struct seq_file *m, void *unused)
1060 {
1061         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1062         struct intel_rps *rps = &dev_priv->gt_pm.rps;
1063         int ret = 0;
1064
1065         intel_runtime_pm_get(dev_priv);
1066
1067         if (IS_GEN5(dev_priv)) {
1068                 u16 rgvswctl = I915_READ16(MEMSWCTL);
1069                 u16 rgvstat = I915_READ16(MEMSTAT_ILK);
1070
1071                 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
1072                 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
1073                 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
1074                            MEMSTAT_VID_SHIFT);
1075                 seq_printf(m, "Current P-state: %d\n",
1076                            (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
1077         } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1078                 u32 rpmodectl, freq_sts;
1079
1080                 mutex_lock(&dev_priv->pcu_lock);
1081
1082                 rpmodectl = I915_READ(GEN6_RP_CONTROL);
1083                 seq_printf(m, "Video Turbo Mode: %s\n",
1084                            yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
1085                 seq_printf(m, "HW control enabled: %s\n",
1086                            yesno(rpmodectl & GEN6_RP_ENABLE));
1087                 seq_printf(m, "SW control enabled: %s\n",
1088                            yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
1089                                   GEN6_RP_MEDIA_SW_MODE));
1090
1091                 freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
1092                 seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
1093                 seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
1094
1095                 seq_printf(m, "actual GPU freq: %d MHz\n",
1096                            intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
1097
1098                 seq_printf(m, "current GPU freq: %d MHz\n",
1099                            intel_gpu_freq(dev_priv, rps->cur_freq));
1100
1101                 seq_printf(m, "max GPU freq: %d MHz\n",
1102                            intel_gpu_freq(dev_priv, rps->max_freq));
1103
1104                 seq_printf(m, "min GPU freq: %d MHz\n",
1105                            intel_gpu_freq(dev_priv, rps->min_freq));
1106
1107                 seq_printf(m, "idle GPU freq: %d MHz\n",
1108                            intel_gpu_freq(dev_priv, rps->idle_freq));
1109
1110                 seq_printf(m,
1111                            "efficient (RPe) frequency: %d MHz\n",
1112                            intel_gpu_freq(dev_priv, rps->efficient_freq));
1113                 mutex_unlock(&dev_priv->pcu_lock);
1114         } else if (INTEL_GEN(dev_priv) >= 6) {
1115                 u32 rp_state_limits;
1116                 u32 gt_perf_status;
1117                 u32 rp_state_cap;
1118                 u32 rpmodectl, rpinclimit, rpdeclimit;
1119                 u32 rpstat, cagf, reqf;
1120                 u32 rpupei, rpcurup, rpprevup;
1121                 u32 rpdownei, rpcurdown, rpprevdown;
1122                 u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
1123                 int max_freq;
1124
1125                 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
1126                 if (IS_GEN9_LP(dev_priv)) {
1127                         rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
1128                         gt_perf_status = I915_READ(BXT_GT_PERF_STATUS);
1129                 } else {
1130                         rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
1131                         gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
1132                 }
1133
1134                 /* RPSTAT1 is in the GT power well */
1135                 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
1136
1137                 reqf = I915_READ(GEN6_RPNSWREQ);
1138                 if (INTEL_GEN(dev_priv) >= 9)
1139                         reqf >>= 23;
1140                 else {
1141                         reqf &= ~GEN6_TURBO_DISABLE;
1142                         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
1143                                 reqf >>= 24;
1144                         else
1145                                 reqf >>= 25;
1146                 }
1147                 reqf = intel_gpu_freq(dev_priv, reqf);
1148
1149                 rpmodectl = I915_READ(GEN6_RP_CONTROL);
1150                 rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
1151                 rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
1152
1153                 rpstat = I915_READ(GEN6_RPSTAT1);
1154                 rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
1155                 rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
1156                 rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
1157                 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
1158                 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
1159                 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
1160                 cagf = intel_gpu_freq(dev_priv,
1161                                       intel_get_cagf(dev_priv, rpstat));
1162
1163                 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1164
1165                 if (INTEL_GEN(dev_priv) >= 11) {
1166                         pm_ier = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE);
1167                         pm_imr = I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK);
1168                         /*
1169                          * The equivalent to the PM ISR & IIR cannot be read
1170                          * without affecting the current state of the system
1171                          */
1172                         pm_isr = 0;
1173                         pm_iir = 0;
1174                 } else if (INTEL_GEN(dev_priv) >= 8) {
1175                         pm_ier = I915_READ(GEN8_GT_IER(2));
1176                         pm_imr = I915_READ(GEN8_GT_IMR(2));
1177                         pm_isr = I915_READ(GEN8_GT_ISR(2));
1178                         pm_iir = I915_READ(GEN8_GT_IIR(2));
1179                 } else {
1180                         pm_ier = I915_READ(GEN6_PMIER);
1181                         pm_imr = I915_READ(GEN6_PMIMR);
1182                         pm_isr = I915_READ(GEN6_PMISR);
1183                         pm_iir = I915_READ(GEN6_PMIIR);
1184                 }
1185                 pm_mask = I915_READ(GEN6_PMINTRMSK);
1186
1187                 seq_printf(m, "Video Turbo Mode: %s\n",
1188                            yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
1189                 seq_printf(m, "HW control enabled: %s\n",
1190                            yesno(rpmodectl & GEN6_RP_ENABLE));
1191                 seq_printf(m, "SW control enabled: %s\n",
1192                            yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
1193                                   GEN6_RP_MEDIA_SW_MODE));
1194
1195                 seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
1196                            pm_ier, pm_imr, pm_mask);
1197                 if (INTEL_GEN(dev_priv) <= 10)
1198                         seq_printf(m, "PM ISR=0x%08x IIR=0x%08x\n",
1199                                    pm_isr, pm_iir);
1200                 seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
1201                            rps->pm_intrmsk_mbz);
1202                 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
1203                 seq_printf(m, "Render p-state ratio: %d\n",
1204                            (gt_perf_status & (INTEL_GEN(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
1205                 seq_printf(m, "Render p-state VID: %d\n",
1206                            gt_perf_status & 0xff);
1207                 seq_printf(m, "Render p-state limit: %d\n",
1208                            rp_state_limits & 0xff);
1209                 seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
1210                 seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
1211                 seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
1212                 seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
1213                 seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
1214                 seq_printf(m, "CAGF: %dMHz\n", cagf);
1215                 seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
1216                            rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei));
1217                 seq_printf(m, "RP CUR UP: %d (%dus)\n",
1218                            rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
1219                 seq_printf(m, "RP PREV UP: %d (%dus)\n",
1220                            rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
1221                 seq_printf(m, "Up threshold: %d%%\n",
1222                            rps->power.up_threshold);
1223
1224                 seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
1225                            rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
1226                 seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
1227                            rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
1228                 seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
1229                            rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
1230                 seq_printf(m, "Down threshold: %d%%\n",
1231                            rps->power.down_threshold);
1232
1233                 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
1234                             rp_state_cap >> 16) & 0xff;
1235                 max_freq *= (IS_GEN9_BC(dev_priv) ||
1236                              INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
1237                 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
1238                            intel_gpu_freq(dev_priv, max_freq));
1239
1240                 max_freq = (rp_state_cap & 0xff00) >> 8;
1241                 max_freq *= (IS_GEN9_BC(dev_priv) ||
1242                              INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
1243                 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
1244                            intel_gpu_freq(dev_priv, max_freq));
1245
1246                 max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
1247                             rp_state_cap >> 0) & 0xff;
1248                 max_freq *= (IS_GEN9_BC(dev_priv) ||
1249                              INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
1250                 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
1251                            intel_gpu_freq(dev_priv, max_freq));
1252                 seq_printf(m, "Max overclocked frequency: %dMHz\n",
1253                            intel_gpu_freq(dev_priv, rps->max_freq));
1254
1255                 seq_printf(m, "Current freq: %d MHz\n",
1256                            intel_gpu_freq(dev_priv, rps->cur_freq));
1257                 seq_printf(m, "Actual freq: %d MHz\n", cagf);
1258                 seq_printf(m, "Idle freq: %d MHz\n",
1259                            intel_gpu_freq(dev_priv, rps->idle_freq));
1260                 seq_printf(m, "Min freq: %d MHz\n",
1261                            intel_gpu_freq(dev_priv, rps->min_freq));
1262                 seq_printf(m, "Boost freq: %d MHz\n",
1263                            intel_gpu_freq(dev_priv, rps->boost_freq));
1264                 seq_printf(m, "Max freq: %d MHz\n",
1265                            intel_gpu_freq(dev_priv, rps->max_freq));
1266                 seq_printf(m,
1267                            "efficient (RPe) frequency: %d MHz\n",
1268                            intel_gpu_freq(dev_priv, rps->efficient_freq));
1269         } else {
1270                 seq_puts(m, "no P-state info available\n");
1271         }
1272
1273         seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk.hw.cdclk);
1274         seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
1275         seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
1276
1277         intel_runtime_pm_put(dev_priv);
1278         return ret;
1279 }
1280
1281 static void i915_instdone_info(struct drm_i915_private *dev_priv,
1282                                struct seq_file *m,
1283                                struct intel_instdone *instdone)
1284 {
1285         int slice;
1286         int subslice;
1287
1288         seq_printf(m, "\t\tINSTDONE: 0x%08x\n",
1289                    instdone->instdone);
1290
1291         if (INTEL_GEN(dev_priv) <= 3)
1292                 return;
1293
1294         seq_printf(m, "\t\tSC_INSTDONE: 0x%08x\n",
1295                    instdone->slice_common);
1296
1297         if (INTEL_GEN(dev_priv) <= 6)
1298                 return;
1299
1300         for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1301                 seq_printf(m, "\t\tSAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
1302                            slice, subslice, instdone->sampler[slice][subslice]);
1303
1304         for_each_instdone_slice_subslice(dev_priv, slice, subslice)
1305                 seq_printf(m, "\t\tROW_INSTDONE[%d][%d]: 0x%08x\n",
1306                            slice, subslice, instdone->row[slice][subslice]);
1307 }
1308
1309 static int i915_hangcheck_info(struct seq_file *m, void *unused)
1310 {
1311         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1312         struct intel_engine_cs *engine;
1313         u64 acthd[I915_NUM_ENGINES];
1314         u32 seqno[I915_NUM_ENGINES];
1315         struct intel_instdone instdone;
1316         enum intel_engine_id id;
1317
1318         if (test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
1319                 seq_puts(m, "Wedged\n");
1320         if (test_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags))
1321                 seq_puts(m, "Reset in progress: struct_mutex backoff\n");
1322         if (test_bit(I915_RESET_HANDOFF, &dev_priv->gpu_error.flags))
1323                 seq_puts(m, "Reset in progress: reset handoff to waiter\n");
1324         if (waitqueue_active(&dev_priv->gpu_error.wait_queue))
1325                 seq_puts(m, "Waiter holding struct mutex\n");
1326         if (waitqueue_active(&dev_priv->gpu_error.reset_queue))
1327                 seq_puts(m, "struct_mutex blocked for reset\n");
1328
1329         if (!i915_modparams.enable_hangcheck) {
1330                 seq_puts(m, "Hangcheck disabled\n");
1331                 return 0;
1332         }
1333
1334         intel_runtime_pm_get(dev_priv);
1335
1336         for_each_engine(engine, dev_priv, id) {
1337                 acthd[id] = intel_engine_get_active_head(engine);
1338                 seqno[id] = intel_engine_get_seqno(engine);
1339         }
1340
1341         intel_engine_get_instdone(dev_priv->engine[RCS], &instdone);
1342
1343         intel_runtime_pm_put(dev_priv);
1344
1345         if (timer_pending(&dev_priv->gpu_error.hangcheck_work.timer))
1346                 seq_printf(m, "Hangcheck active, timer fires in %dms\n",
1347                            jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires -
1348                                             jiffies));
1349         else if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work))
1350                 seq_puts(m, "Hangcheck active, work pending\n");
1351         else
1352                 seq_puts(m, "Hangcheck inactive\n");
1353
1354         seq_printf(m, "GT active? %s\n", yesno(dev_priv->gt.awake));
1355
1356         for_each_engine(engine, dev_priv, id) {
1357                 struct intel_breadcrumbs *b = &engine->breadcrumbs;
1358                 struct rb_node *rb;
1359
1360                 seq_printf(m, "%s:\n", engine->name);
1361                 seq_printf(m, "\tseqno = %x [current %x, last %x]\n",
1362                            engine->hangcheck.seqno, seqno[id],
1363                            intel_engine_last_submit(engine));
1364                 seq_printf(m, "\twaiters? %s, fake irq active? %s, stalled? %s, wedged? %s\n",
1365                            yesno(intel_engine_has_waiter(engine)),
1366                            yesno(test_bit(engine->id,
1367                                           &dev_priv->gpu_error.missed_irq_rings)),
1368                            yesno(engine->hangcheck.stalled),
1369                            yesno(engine->hangcheck.wedged));
1370
1371                 spin_lock_irq(&b->rb_lock);
1372                 for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
1373                         struct intel_wait *w = rb_entry(rb, typeof(*w), node);
1374
1375                         seq_printf(m, "\t%s [%d] waiting for %x\n",
1376                                    w->tsk->comm, w->tsk->pid, w->seqno);
1377                 }
1378                 spin_unlock_irq(&b->rb_lock);
1379
1380                 seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
1381                            (long long)engine->hangcheck.acthd,
1382                            (long long)acthd[id]);
1383                 seq_printf(m, "\taction = %s(%d) %d ms ago\n",
1384                            hangcheck_action_to_str(engine->hangcheck.action),
1385                            engine->hangcheck.action,
1386                            jiffies_to_msecs(jiffies -
1387                                             engine->hangcheck.action_timestamp));
1388
1389                 if (engine->id == RCS) {
1390                         seq_puts(m, "\tinstdone read =\n");
1391
1392                         i915_instdone_info(dev_priv, m, &instdone);
1393
1394                         seq_puts(m, "\tinstdone accu =\n");
1395
1396                         i915_instdone_info(dev_priv, m,
1397                                            &engine->hangcheck.instdone);
1398                 }
1399         }
1400
1401         return 0;
1402 }
1403
1404 static int i915_reset_info(struct seq_file *m, void *unused)
1405 {
1406         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1407         struct i915_gpu_error *error = &dev_priv->gpu_error;
1408         struct intel_engine_cs *engine;
1409         enum intel_engine_id id;
1410
1411         seq_printf(m, "full gpu reset = %u\n", i915_reset_count(error));
1412
1413         for_each_engine(engine, dev_priv, id) {
1414                 seq_printf(m, "%s = %u\n", engine->name,
1415                            i915_reset_engine_count(error, engine));
1416         }
1417
1418         return 0;
1419 }
1420
1421 static int ironlake_drpc_info(struct seq_file *m)
1422 {
1423         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1424         u32 rgvmodectl, rstdbyctl;
1425         u16 crstandvid;
1426
1427         rgvmodectl = I915_READ(MEMMODECTL);
1428         rstdbyctl = I915_READ(RSTDBYCTL);
1429         crstandvid = I915_READ16(CRSTANDVID);
1430
1431         seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
1432         seq_printf(m, "Boost freq: %d\n",
1433                    (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
1434                    MEMMODE_BOOST_FREQ_SHIFT);
1435         seq_printf(m, "HW control enabled: %s\n",
1436                    yesno(rgvmodectl & MEMMODE_HWIDLE_EN));
1437         seq_printf(m, "SW control enabled: %s\n",
1438                    yesno(rgvmodectl & MEMMODE_SWMODE_EN));
1439         seq_printf(m, "Gated voltage change: %s\n",
1440                    yesno(rgvmodectl & MEMMODE_RCLK_GATE));
1441         seq_printf(m, "Starting frequency: P%d\n",
1442                    (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
1443         seq_printf(m, "Max P-state: P%d\n",
1444                    (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
1445         seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
1446         seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
1447         seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
1448         seq_printf(m, "Render standby enabled: %s\n",
1449                    yesno(!(rstdbyctl & RCX_SW_EXIT)));
1450         seq_puts(m, "Current RS state: ");
1451         switch (rstdbyctl & RSX_STATUS_MASK) {
1452         case RSX_STATUS_ON:
1453                 seq_puts(m, "on\n");
1454                 break;
1455         case RSX_STATUS_RC1:
1456                 seq_puts(m, "RC1\n");
1457                 break;
1458         case RSX_STATUS_RC1E:
1459                 seq_puts(m, "RC1E\n");
1460                 break;
1461         case RSX_STATUS_RS1:
1462                 seq_puts(m, "RS1\n");
1463                 break;
1464         case RSX_STATUS_RS2:
1465                 seq_puts(m, "RS2 (RC6)\n");
1466                 break;
1467         case RSX_STATUS_RS3:
1468                 seq_puts(m, "RC3 (RC6+)\n");
1469                 break;
1470         default:
1471                 seq_puts(m, "unknown\n");
1472                 break;
1473         }
1474
1475         return 0;
1476 }
1477
1478 static int i915_forcewake_domains(struct seq_file *m, void *data)
1479 {
1480         struct drm_i915_private *i915 = node_to_i915(m->private);
1481         struct intel_uncore_forcewake_domain *fw_domain;
1482         unsigned int tmp;
1483
1484         seq_printf(m, "user.bypass_count = %u\n",
1485                    i915->uncore.user_forcewake.count);
1486
1487         for_each_fw_domain(fw_domain, i915, tmp)
1488                 seq_printf(m, "%s.wake_count = %u\n",
1489                            intel_uncore_forcewake_domain_to_str(fw_domain->id),
1490                            READ_ONCE(fw_domain->wake_count));
1491
1492         return 0;
1493 }
1494
1495 static void print_rc6_res(struct seq_file *m,
1496                           const char *title,
1497                           const i915_reg_t reg)
1498 {
1499         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1500
1501         seq_printf(m, "%s %u (%llu us)\n",
1502                    title, I915_READ(reg),
1503                    intel_rc6_residency_us(dev_priv, reg));
1504 }
1505
1506 static int vlv_drpc_info(struct seq_file *m)
1507 {
1508         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1509         u32 rcctl1, pw_status;
1510
1511         pw_status = I915_READ(VLV_GTLC_PW_STATUS);
1512         rcctl1 = I915_READ(GEN6_RC_CONTROL);
1513
1514         seq_printf(m, "RC6 Enabled: %s\n",
1515                    yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
1516                                         GEN6_RC_CTL_EI_MODE(1))));
1517         seq_printf(m, "Render Power Well: %s\n",
1518                    (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
1519         seq_printf(m, "Media Power Well: %s\n",
1520                    (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
1521
1522         print_rc6_res(m, "Render RC6 residency since boot:", VLV_GT_RENDER_RC6);
1523         print_rc6_res(m, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6);
1524
1525         return i915_forcewake_domains(m, NULL);
1526 }
1527
1528 static int gen6_drpc_info(struct seq_file *m)
1529 {
1530         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1531         u32 gt_core_status, rcctl1, rc6vids = 0;
1532         u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
1533
1534         gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS);
1535         trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
1536
1537         rcctl1 = I915_READ(GEN6_RC_CONTROL);
1538         if (INTEL_GEN(dev_priv) >= 9) {
1539                 gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE);
1540                 gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
1541         }
1542
1543         if (INTEL_GEN(dev_priv) <= 7) {
1544                 mutex_lock(&dev_priv->pcu_lock);
1545                 sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS,
1546                                        &rc6vids);
1547                 mutex_unlock(&dev_priv->pcu_lock);
1548         }
1549
1550         seq_printf(m, "RC1e Enabled: %s\n",
1551                    yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
1552         seq_printf(m, "RC6 Enabled: %s\n",
1553                    yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
1554         if (INTEL_GEN(dev_priv) >= 9) {
1555                 seq_printf(m, "Render Well Gating Enabled: %s\n",
1556                         yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
1557                 seq_printf(m, "Media Well Gating Enabled: %s\n",
1558                         yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE));
1559         }
1560         seq_printf(m, "Deep RC6 Enabled: %s\n",
1561                    yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
1562         seq_printf(m, "Deepest RC6 Enabled: %s\n",
1563                    yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
1564         seq_puts(m, "Current RC state: ");
1565         switch (gt_core_status & GEN6_RCn_MASK) {
1566         case GEN6_RC0:
1567                 if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
1568                         seq_puts(m, "Core Power Down\n");
1569                 else
1570                         seq_puts(m, "on\n");
1571                 break;
1572         case GEN6_RC3:
1573                 seq_puts(m, "RC3\n");
1574                 break;
1575         case GEN6_RC6:
1576                 seq_puts(m, "RC6\n");
1577                 break;
1578         case GEN6_RC7:
1579                 seq_puts(m, "RC7\n");
1580                 break;
1581         default:
1582                 seq_puts(m, "Unknown\n");
1583                 break;
1584         }
1585
1586         seq_printf(m, "Core Power Down: %s\n",
1587                    yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
1588         if (INTEL_GEN(dev_priv) >= 9) {
1589                 seq_printf(m, "Render Power Well: %s\n",
1590                         (gen9_powergate_status &
1591                          GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
1592                 seq_printf(m, "Media Power Well: %s\n",
1593                         (gen9_powergate_status &
1594                          GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down");
1595         }
1596
1597         /* Not exactly sure what this is */
1598         print_rc6_res(m, "RC6 \"Locked to RPn\" residency since boot:",
1599                       GEN6_GT_GFX_RC6_LOCKED);
1600         print_rc6_res(m, "RC6 residency since boot:", GEN6_GT_GFX_RC6);
1601         print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p);
1602         print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp);
1603
1604         if (INTEL_GEN(dev_priv) <= 7) {
1605                 seq_printf(m, "RC6   voltage: %dmV\n",
1606                            GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
1607                 seq_printf(m, "RC6+  voltage: %dmV\n",
1608                            GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
1609                 seq_printf(m, "RC6++ voltage: %dmV\n",
1610                            GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
1611         }
1612
1613         return i915_forcewake_domains(m, NULL);
1614 }
1615
1616 static int i915_drpc_info(struct seq_file *m, void *unused)
1617 {
1618         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1619         int err;
1620
1621         intel_runtime_pm_get(dev_priv);
1622
1623         if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1624                 err = vlv_drpc_info(m);
1625         else if (INTEL_GEN(dev_priv) >= 6)
1626                 err = gen6_drpc_info(m);
1627         else
1628                 err = ironlake_drpc_info(m);
1629
1630         intel_runtime_pm_put(dev_priv);
1631
1632         return err;
1633 }
1634
1635 static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
1636 {
1637         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1638
1639         seq_printf(m, "FB tracking busy bits: 0x%08x\n",
1640                    dev_priv->fb_tracking.busy_bits);
1641
1642         seq_printf(m, "FB tracking flip bits: 0x%08x\n",
1643                    dev_priv->fb_tracking.flip_bits);
1644
1645         return 0;
1646 }
1647
1648 static int i915_fbc_status(struct seq_file *m, void *unused)
1649 {
1650         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1651         struct intel_fbc *fbc = &dev_priv->fbc;
1652
1653         if (!HAS_FBC(dev_priv))
1654                 return -ENODEV;
1655
1656         intel_runtime_pm_get(dev_priv);
1657         mutex_lock(&fbc->lock);
1658
1659         if (intel_fbc_is_active(dev_priv))
1660                 seq_puts(m, "FBC enabled\n");
1661         else
1662                 seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
1663
1664         if (intel_fbc_is_active(dev_priv)) {
1665                 u32 mask;
1666
1667                 if (INTEL_GEN(dev_priv) >= 8)
1668                         mask = I915_READ(IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
1669                 else if (INTEL_GEN(dev_priv) >= 7)
1670                         mask = I915_READ(IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
1671                 else if (INTEL_GEN(dev_priv) >= 5)
1672                         mask = I915_READ(ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
1673                 else if (IS_G4X(dev_priv))
1674                         mask = I915_READ(DPFC_STATUS) & DPFC_COMP_SEG_MASK;
1675                 else
1676                         mask = I915_READ(FBC_STATUS) & (FBC_STAT_COMPRESSING |
1677                                                         FBC_STAT_COMPRESSED);
1678
1679                 seq_printf(m, "Compressing: %s\n", yesno(mask));
1680         }
1681
1682         mutex_unlock(&fbc->lock);
1683         intel_runtime_pm_put(dev_priv);
1684
1685         return 0;
1686 }
1687
1688 static int i915_fbc_false_color_get(void *data, u64 *val)
1689 {
1690         struct drm_i915_private *dev_priv = data;
1691
1692         if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
1693                 return -ENODEV;
1694
1695         *val = dev_priv->fbc.false_color;
1696
1697         return 0;
1698 }
1699
1700 static int i915_fbc_false_color_set(void *data, u64 val)
1701 {
1702         struct drm_i915_private *dev_priv = data;
1703         u32 reg;
1704
1705         if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
1706                 return -ENODEV;
1707
1708         mutex_lock(&dev_priv->fbc.lock);
1709
1710         reg = I915_READ(ILK_DPFC_CONTROL);
1711         dev_priv->fbc.false_color = val;
1712
1713         I915_WRITE(ILK_DPFC_CONTROL, val ?
1714                    (reg | FBC_CTL_FALSE_COLOR) :
1715                    (reg & ~FBC_CTL_FALSE_COLOR));
1716
1717         mutex_unlock(&dev_priv->fbc.lock);
1718         return 0;
1719 }
1720
1721 DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
1722                         i915_fbc_false_color_get, i915_fbc_false_color_set,
1723                         "%llu\n");
1724
1725 static int i915_ips_status(struct seq_file *m, void *unused)
1726 {
1727         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1728
1729         if (!HAS_IPS(dev_priv))
1730                 return -ENODEV;
1731
1732         intel_runtime_pm_get(dev_priv);
1733
1734         seq_printf(m, "Enabled by kernel parameter: %s\n",
1735                    yesno(i915_modparams.enable_ips));
1736
1737         if (INTEL_GEN(dev_priv) >= 8) {
1738                 seq_puts(m, "Currently: unknown\n");
1739         } else {
1740                 if (I915_READ(IPS_CTL) & IPS_ENABLE)
1741                         seq_puts(m, "Currently: enabled\n");
1742                 else
1743                         seq_puts(m, "Currently: disabled\n");
1744         }
1745
1746         intel_runtime_pm_put(dev_priv);
1747
1748         return 0;
1749 }
1750
1751 static int i915_sr_status(struct seq_file *m, void *unused)
1752 {
1753         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1754         bool sr_enabled = false;
1755
1756         intel_runtime_pm_get(dev_priv);
1757         intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
1758
1759         if (INTEL_GEN(dev_priv) >= 9)
1760                 /* no global SR status; inspect per-plane WM */;
1761         else if (HAS_PCH_SPLIT(dev_priv))
1762                 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
1763         else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
1764                  IS_I945G(dev_priv) || IS_I945GM(dev_priv))
1765                 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
1766         else if (IS_I915GM(dev_priv))
1767                 sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
1768         else if (IS_PINEVIEW(dev_priv))
1769                 sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
1770         else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1771                 sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
1772
1773         intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
1774         intel_runtime_pm_put(dev_priv);
1775
1776         seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
1777
1778         return 0;
1779 }
1780
1781 static int i915_emon_status(struct seq_file *m, void *unused)
1782 {
1783         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1784         struct drm_device *dev = &dev_priv->drm;
1785         unsigned long temp, chipset, gfx;
1786         int ret;
1787
1788         if (!IS_GEN5(dev_priv))
1789                 return -ENODEV;
1790
1791         ret = mutex_lock_interruptible(&dev->struct_mutex);
1792         if (ret)
1793                 return ret;
1794
1795         temp = i915_mch_val(dev_priv);
1796         chipset = i915_chipset_val(dev_priv);
1797         gfx = i915_gfx_val(dev_priv);
1798         mutex_unlock(&dev->struct_mutex);
1799
1800         seq_printf(m, "GMCH temp: %ld\n", temp);
1801         seq_printf(m, "Chipset power: %ld\n", chipset);
1802         seq_printf(m, "GFX power: %ld\n", gfx);
1803         seq_printf(m, "Total power: %ld\n", chipset + gfx);
1804
1805         return 0;
1806 }
1807
1808 static int i915_ring_freq_table(struct seq_file *m, void *unused)
1809 {
1810         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1811         struct intel_rps *rps = &dev_priv->gt_pm.rps;
1812         unsigned int max_gpu_freq, min_gpu_freq;
1813         int gpu_freq, ia_freq;
1814         int ret;
1815
1816         if (!HAS_LLC(dev_priv))
1817                 return -ENODEV;
1818
1819         intel_runtime_pm_get(dev_priv);
1820
1821         ret = mutex_lock_interruptible(&dev_priv->pcu_lock);
1822         if (ret)
1823                 goto out;
1824
1825         min_gpu_freq = rps->min_freq;
1826         max_gpu_freq = rps->max_freq;
1827         if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
1828                 /* Convert GT frequency to 50 HZ units */
1829                 min_gpu_freq /= GEN9_FREQ_SCALER;
1830                 max_gpu_freq /= GEN9_FREQ_SCALER;
1831         }
1832
1833         seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
1834
1835         for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) {
1836                 ia_freq = gpu_freq;
1837                 sandybridge_pcode_read(dev_priv,
1838                                        GEN6_PCODE_READ_MIN_FREQ_TABLE,
1839                                        &ia_freq);
1840                 seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
1841                            intel_gpu_freq(dev_priv, (gpu_freq *
1842                                                      (IS_GEN9_BC(dev_priv) ||
1843                                                       INTEL_GEN(dev_priv) >= 10 ?
1844                                                       GEN9_FREQ_SCALER : 1))),
1845                            ((ia_freq >> 0) & 0xff) * 100,
1846                            ((ia_freq >> 8) & 0xff) * 100);
1847         }
1848
1849         mutex_unlock(&dev_priv->pcu_lock);
1850
1851 out:
1852         intel_runtime_pm_put(dev_priv);
1853         return ret;
1854 }
1855
1856 static int i915_opregion(struct seq_file *m, void *unused)
1857 {
1858         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1859         struct drm_device *dev = &dev_priv->drm;
1860         struct intel_opregion *opregion = &dev_priv->opregion;
1861         int ret;
1862
1863         ret = mutex_lock_interruptible(&dev->struct_mutex);
1864         if (ret)
1865                 goto out;
1866
1867         if (opregion->header)
1868                 seq_write(m, opregion->header, OPREGION_SIZE);
1869
1870         mutex_unlock(&dev->struct_mutex);
1871
1872 out:
1873         return 0;
1874 }
1875
1876 static int i915_vbt(struct seq_file *m, void *unused)
1877 {
1878         struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
1879
1880         if (opregion->vbt)
1881                 seq_write(m, opregion->vbt, opregion->vbt_size);
1882
1883         return 0;
1884 }
1885
1886 static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1887 {
1888         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1889         struct drm_device *dev = &dev_priv->drm;
1890         struct intel_framebuffer *fbdev_fb = NULL;
1891         struct drm_framebuffer *drm_fb;
1892         int ret;
1893
1894         ret = mutex_lock_interruptible(&dev->struct_mutex);
1895         if (ret)
1896                 return ret;
1897
1898 #ifdef CONFIG_DRM_FBDEV_EMULATION
1899         if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
1900                 fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
1901
1902                 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1903                            fbdev_fb->base.width,
1904                            fbdev_fb->base.height,
1905                            fbdev_fb->base.format->depth,
1906                            fbdev_fb->base.format->cpp[0] * 8,
1907                            fbdev_fb->base.modifier,
1908                            drm_framebuffer_read_refcount(&fbdev_fb->base));
1909                 describe_obj(m, intel_fb_obj(&fbdev_fb->base));
1910                 seq_putc(m, '\n');
1911         }
1912 #endif
1913
1914         mutex_lock(&dev->mode_config.fb_lock);
1915         drm_for_each_fb(drm_fb, dev) {
1916                 struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
1917                 if (fb == fbdev_fb)
1918                         continue;
1919
1920                 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1921                            fb->base.width,
1922                            fb->base.height,
1923                            fb->base.format->depth,
1924                            fb->base.format->cpp[0] * 8,
1925                            fb->base.modifier,
1926                            drm_framebuffer_read_refcount(&fb->base));
1927                 describe_obj(m, intel_fb_obj(&fb->base));
1928                 seq_putc(m, '\n');
1929         }
1930         mutex_unlock(&dev->mode_config.fb_lock);
1931         mutex_unlock(&dev->struct_mutex);
1932
1933         return 0;
1934 }
1935
1936 static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
1937 {
1938         seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, emit: %u)",
1939                    ring->space, ring->head, ring->tail, ring->emit);
1940 }
1941
1942 static int i915_context_status(struct seq_file *m, void *unused)
1943 {
1944         struct drm_i915_private *dev_priv = node_to_i915(m->private);
1945         struct drm_device *dev = &dev_priv->drm;
1946         struct intel_engine_cs *engine;
1947         struct i915_gem_context *ctx;
1948         enum intel_engine_id id;
1949         int ret;
1950
1951         ret = mutex_lock_interruptible(&dev->struct_mutex);
1952         if (ret)
1953                 return ret;
1954
1955         list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
1956                 seq_puts(m, "HW context ");
1957                 if (!list_empty(&ctx->hw_id_link))
1958                         seq_printf(m, "%x [pin %u]", ctx->hw_id,
1959                                    atomic_read(&ctx->hw_id_pin_count));
1960                 if (ctx->pid) {
1961                         struct task_struct *task;
1962
1963                         task = get_pid_task(ctx->pid, PIDTYPE_PID);
1964                         if (task) {
1965                                 seq_printf(m, "(%s [%d]) ",
1966                                            task->comm, task->pid);
1967                                 put_task_struct(task);
1968                         }
1969                 } else if (IS_ERR(ctx->file_priv)) {
1970                         seq_puts(m, "(deleted) ");
1971                 } else {
1972                         seq_puts(m, "(kernel) ");
1973                 }
1974
1975                 seq_putc(m, ctx->remap_slice ? 'R' : 'r');
1976                 seq_putc(m, '\n');
1977
1978                 for_each_engine(engine, dev_priv, id) {
1979                         struct intel_context *ce =
1980                                 to_intel_context(ctx, engine);
1981
1982                         seq_printf(m, "%s: ", engine->name);
1983                         if (ce->state)
1984                                 describe_obj(m, ce->state->obj);
1985                         if (ce->ring)
1986                                 describe_ctx_ring(m, ce->ring);
1987                         seq_putc(m, '\n');
1988                 }
1989
1990                 seq_putc(m, '\n');
1991         }
1992
1993         mutex_unlock(&dev->struct_mutex);
1994
1995         return 0;
1996 }
1997
1998 static const char *swizzle_string(unsigned swizzle)
1999 {
2000         switch (swizzle) {
2001         case I915_BIT_6_SWIZZLE_NONE:
2002                 return "none";
2003         case I915_BIT_6_SWIZZLE_9:
2004                 return "bit9";
2005         case I915_BIT_6_SWIZZLE_9_10:
2006                 return "bit9/bit10";
2007         case I915_BIT_6_SWIZZLE_9_11:
2008                 return "bit9/bit11";
2009         case I915_BIT_6_SWIZZLE_9_10_11:
2010                 return "bit9/bit10/bit11";
2011         case I915_BIT_6_SWIZZLE_9_17:
2012                 return "bit9/bit17";
2013         case I915_BIT_6_SWIZZLE_9_10_17:
2014                 return "bit9/bit10/bit17";
2015         case I915_BIT_6_SWIZZLE_UNKNOWN:
2016                 return "unknown";
2017         }
2018
2019         return "bug";
2020 }
2021
2022 static int i915_swizzle_info(struct seq_file *m, void *data)
2023 {
2024         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2025
2026         intel_runtime_pm_get(dev_priv);
2027
2028         seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
2029                    swizzle_string(dev_priv->mm.bit_6_swizzle_x));
2030         seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
2031                    swizzle_string(dev_priv->mm.bit_6_swizzle_y));
2032
2033         if (IS_GEN3(dev_priv) || IS_GEN4(dev_priv)) {
2034                 seq_printf(m, "DDC = 0x%08x\n",
2035                            I915_READ(DCC));
2036                 seq_printf(m, "DDC2 = 0x%08x\n",
2037                            I915_READ(DCC2));
2038                 seq_printf(m, "C0DRB3 = 0x%04x\n",
2039                            I915_READ16(C0DRB3));
2040                 seq_printf(m, "C1DRB3 = 0x%04x\n",
2041                            I915_READ16(C1DRB3));
2042         } else if (INTEL_GEN(dev_priv) >= 6) {
2043                 seq_printf(m, "MAD_DIMM_C0 = 0x%08x\n",
2044                            I915_READ(MAD_DIMM_C0));
2045                 seq_printf(m, "MAD_DIMM_C1 = 0x%08x\n",
2046                            I915_READ(MAD_DIMM_C1));
2047                 seq_printf(m, "MAD_DIMM_C2 = 0x%08x\n",
2048                            I915_READ(MAD_DIMM_C2));
2049                 seq_printf(m, "TILECTL = 0x%08x\n",
2050                            I915_READ(TILECTL));
2051                 if (INTEL_GEN(dev_priv) >= 8)
2052                         seq_printf(m, "GAMTARBMODE = 0x%08x\n",
2053                                    I915_READ(GAMTARBMODE));
2054                 else
2055                         seq_printf(m, "ARB_MODE = 0x%08x\n",
2056                                    I915_READ(ARB_MODE));
2057                 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
2058                            I915_READ(DISP_ARB_CTL));
2059         }
2060
2061         if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
2062                 seq_puts(m, "L-shaped memory detected\n");
2063
2064         intel_runtime_pm_put(dev_priv);
2065
2066         return 0;
2067 }
2068
2069 static int per_file_ctx(int id, void *ptr, void *data)
2070 {
2071         struct i915_gem_context *ctx = ptr;
2072         struct seq_file *m = data;
2073         struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
2074
2075         if (!ppgtt) {
2076                 seq_printf(m, "  no ppgtt for context %d\n",
2077                            ctx->user_handle);
2078                 return 0;
2079         }
2080
2081         if (i915_gem_context_is_default(ctx))
2082                 seq_puts(m, "  default context:\n");
2083         else
2084                 seq_printf(m, "  context %d:\n", ctx->user_handle);
2085         ppgtt->debug_dump(ppgtt, m);
2086
2087         return 0;
2088 }
2089
2090 static void gen8_ppgtt_info(struct seq_file *m,
2091                             struct drm_i915_private *dev_priv)
2092 {
2093         struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2094         struct intel_engine_cs *engine;
2095         enum intel_engine_id id;
2096         int i;
2097
2098         if (!ppgtt)
2099                 return;
2100
2101         for_each_engine(engine, dev_priv, id) {
2102                 seq_printf(m, "%s\n", engine->name);
2103                 for (i = 0; i < 4; i++) {
2104                         u64 pdp = I915_READ(GEN8_RING_PDP_UDW(engine, i));
2105                         pdp <<= 32;
2106                         pdp |= I915_READ(GEN8_RING_PDP_LDW(engine, i));
2107                         seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp);
2108                 }
2109         }
2110 }
2111
2112 static void gen6_ppgtt_info(struct seq_file *m,
2113                             struct drm_i915_private *dev_priv)
2114 {
2115         struct intel_engine_cs *engine;
2116         enum intel_engine_id id;
2117
2118         if (IS_GEN6(dev_priv))
2119                 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
2120
2121         for_each_engine(engine, dev_priv, id) {
2122                 seq_printf(m, "%s\n", engine->name);
2123                 if (IS_GEN7(dev_priv))
2124                         seq_printf(m, "GFX_MODE: 0x%08x\n",
2125                                    I915_READ(RING_MODE_GEN7(engine)));
2126                 seq_printf(m, "PP_DIR_BASE: 0x%08x\n",
2127                            I915_READ(RING_PP_DIR_BASE(engine)));
2128                 seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n",
2129                            I915_READ(RING_PP_DIR_BASE_READ(engine)));
2130                 seq_printf(m, "PP_DIR_DCLV: 0x%08x\n",
2131                            I915_READ(RING_PP_DIR_DCLV(engine)));
2132         }
2133         if (dev_priv->mm.aliasing_ppgtt) {
2134                 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2135
2136                 seq_puts(m, "aliasing PPGTT:\n");
2137                 seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd.base.ggtt_offset);
2138
2139                 ppgtt->debug_dump(ppgtt, m);
2140         }
2141
2142         seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
2143 }
2144
2145 static int i915_ppgtt_info(struct seq_file *m, void *data)
2146 {
2147         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2148         struct drm_device *dev = &dev_priv->drm;
2149         struct drm_file *file;
2150         int ret;
2151
2152         mutex_lock(&dev->filelist_mutex);
2153         ret = mutex_lock_interruptible(&dev->struct_mutex);
2154         if (ret)
2155                 goto out_unlock;
2156
2157         intel_runtime_pm_get(dev_priv);
2158
2159         if (INTEL_GEN(dev_priv) >= 8)
2160                 gen8_ppgtt_info(m, dev_priv);
2161         else if (INTEL_GEN(dev_priv) >= 6)
2162                 gen6_ppgtt_info(m, dev_priv);
2163
2164         list_for_each_entry_reverse(file, &dev->filelist, lhead) {
2165                 struct drm_i915_file_private *file_priv = file->driver_priv;
2166                 struct task_struct *task;
2167
2168                 task = get_pid_task(file->pid, PIDTYPE_PID);
2169                 if (!task) {
2170                         ret = -ESRCH;
2171                         goto out_rpm;
2172                 }
2173                 seq_printf(m, "\nproc: %s\n", task->comm);
2174                 put_task_struct(task);
2175                 idr_for_each(&file_priv->context_idr, per_file_ctx,
2176                              (void *)(unsigned long)m);
2177         }
2178
2179 out_rpm:
2180         intel_runtime_pm_put(dev_priv);
2181         mutex_unlock(&dev->struct_mutex);
2182 out_unlock:
2183         mutex_unlock(&dev->filelist_mutex);
2184         return ret;
2185 }
2186
2187 static int count_irq_waiters(struct drm_i915_private *i915)
2188 {
2189         struct intel_engine_cs *engine;
2190         enum intel_engine_id id;
2191         int count = 0;
2192
2193         for_each_engine(engine, i915, id)
2194                 count += intel_engine_has_waiter(engine);
2195
2196         return count;
2197 }
2198
2199 static const char *rps_power_to_str(unsigned int power)
2200 {
2201         static const char * const strings[] = {
2202                 [LOW_POWER] = "low power",
2203                 [BETWEEN] = "mixed",
2204                 [HIGH_POWER] = "high power",
2205         };
2206
2207         if (power >= ARRAY_SIZE(strings) || !strings[power])
2208                 return "unknown";
2209
2210         return strings[power];
2211 }
2212
2213 static int i915_rps_boost_info(struct seq_file *m, void *data)
2214 {
2215         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2216         struct drm_device *dev = &dev_priv->drm;
2217         struct intel_rps *rps = &dev_priv->gt_pm.rps;
2218         u32 act_freq = rps->cur_freq;
2219         struct drm_file *file;
2220
2221         if (intel_runtime_pm_get_if_in_use(dev_priv)) {
2222                 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
2223                         mutex_lock(&dev_priv->pcu_lock);
2224                         act_freq = vlv_punit_read(dev_priv,
2225                                                   PUNIT_REG_GPU_FREQ_STS);
2226                         act_freq = (act_freq >> 8) & 0xff;
2227                         mutex_unlock(&dev_priv->pcu_lock);
2228                 } else {
2229                         act_freq = intel_get_cagf(dev_priv,
2230                                                   I915_READ(GEN6_RPSTAT1));
2231                 }
2232                 intel_runtime_pm_put(dev_priv);
2233         }
2234
2235         seq_printf(m, "RPS enabled? %d\n", rps->enabled);
2236         seq_printf(m, "GPU busy? %s [%d requests]\n",
2237                    yesno(dev_priv->gt.awake), dev_priv->gt.active_requests);
2238         seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv));
2239         seq_printf(m, "Boosts outstanding? %d\n",
2240                    atomic_read(&rps->num_waiters));
2241         seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
2242         seq_printf(m, "Frequency requested %d, actual %d\n",
2243                    intel_gpu_freq(dev_priv, rps->cur_freq),
2244                    intel_gpu_freq(dev_priv, act_freq));
2245         seq_printf(m, "  min hard:%d, soft:%d; max soft:%d, hard:%d\n",
2246                    intel_gpu_freq(dev_priv, rps->min_freq),
2247                    intel_gpu_freq(dev_priv, rps->min_freq_softlimit),
2248                    intel_gpu_freq(dev_priv, rps->max_freq_softlimit),
2249                    intel_gpu_freq(dev_priv, rps->max_freq));
2250         seq_printf(m, "  idle:%d, efficient:%d, boost:%d\n",
2251                    intel_gpu_freq(dev_priv, rps->idle_freq),
2252                    intel_gpu_freq(dev_priv, rps->efficient_freq),
2253                    intel_gpu_freq(dev_priv, rps->boost_freq));
2254
2255         mutex_lock(&dev->filelist_mutex);
2256         list_for_each_entry_reverse(file, &dev->filelist, lhead) {
2257                 struct drm_i915_file_private *file_priv = file->driver_priv;
2258                 struct task_struct *task;
2259
2260                 rcu_read_lock();
2261                 task = pid_task(file->pid, PIDTYPE_PID);
2262                 seq_printf(m, "%s [%d]: %d boosts\n",
2263                            task ? task->comm : "<unknown>",
2264                            task ? task->pid : -1,
2265                            atomic_read(&file_priv->rps_client.boosts));
2266                 rcu_read_unlock();
2267         }
2268         seq_printf(m, "Kernel (anonymous) boosts: %d\n",
2269                    atomic_read(&rps->boosts));
2270         mutex_unlock(&dev->filelist_mutex);
2271
2272         if (INTEL_GEN(dev_priv) >= 6 &&
2273             rps->enabled &&
2274             dev_priv->gt.active_requests) {
2275                 u32 rpup, rpupei;
2276                 u32 rpdown, rpdownei;
2277
2278                 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
2279                 rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
2280                 rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
2281                 rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
2282                 rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
2283                 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
2284
2285                 seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
2286                            rps_power_to_str(rps->power.mode));
2287                 seq_printf(m, "  Avg. up: %d%% [above threshold? %d%%]\n",
2288                            rpup && rpupei ? 100 * rpup / rpupei : 0,
2289                            rps->power.up_threshold);
2290                 seq_printf(m, "  Avg. down: %d%% [below threshold? %d%%]\n",
2291                            rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
2292                            rps->power.down_threshold);
2293         } else {
2294                 seq_puts(m, "\nRPS Autotuning inactive\n");
2295         }
2296
2297         return 0;
2298 }
2299
2300 static int i915_llc(struct seq_file *m, void *data)
2301 {
2302         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2303         const bool edram = INTEL_GEN(dev_priv) > 8;
2304
2305         seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv)));
2306         seq_printf(m, "%s: %lluMB\n", edram ? "eDRAM" : "eLLC",
2307                    intel_uncore_edram_size(dev_priv)/1024/1024);
2308
2309         return 0;
2310 }
2311
2312 static int i915_huc_load_status_info(struct seq_file *m, void *data)
2313 {
2314         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2315         struct drm_printer p;
2316
2317         if (!HAS_HUC(dev_priv))
2318                 return -ENODEV;
2319
2320         p = drm_seq_file_printer(m);
2321         intel_uc_fw_dump(&dev_priv->huc.fw, &p);
2322
2323         intel_runtime_pm_get(dev_priv);
2324         seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
2325         intel_runtime_pm_put(dev_priv);
2326
2327         return 0;
2328 }
2329
2330 static int i915_guc_load_status_info(struct seq_file *m, void *data)
2331 {
2332         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2333         struct drm_printer p;
2334         u32 tmp, i;
2335
2336         if (!HAS_GUC(dev_priv))
2337                 return -ENODEV;
2338
2339         p = drm_seq_file_printer(m);
2340         intel_uc_fw_dump(&dev_priv->guc.fw, &p);
2341
2342         intel_runtime_pm_get(dev_priv);
2343
2344         tmp = I915_READ(GUC_STATUS);
2345
2346         seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
2347         seq_printf(m, "\tBootrom status = 0x%x\n",
2348                 (tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
2349         seq_printf(m, "\tuKernel status = 0x%x\n",
2350                 (tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
2351         seq_printf(m, "\tMIA Core status = 0x%x\n",
2352                 (tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
2353         seq_puts(m, "\nScratch registers:\n");
2354         for (i = 0; i < 16; i++)
2355                 seq_printf(m, "\t%2d: \t0x%x\n", i, I915_READ(SOFT_SCRATCH(i)));
2356
2357         intel_runtime_pm_put(dev_priv);
2358
2359         return 0;
2360 }
2361
2362 static const char *
2363 stringify_guc_log_type(enum guc_log_buffer_type type)
2364 {
2365         switch (type) {
2366         case GUC_ISR_LOG_BUFFER:
2367                 return "ISR";
2368         case GUC_DPC_LOG_BUFFER:
2369                 return "DPC";
2370         case GUC_CRASH_DUMP_LOG_BUFFER:
2371                 return "CRASH";
2372         default:
2373                 MISSING_CASE(type);
2374         }
2375
2376         return "";
2377 }
2378
2379 static void i915_guc_log_info(struct seq_file *m,
2380                               struct drm_i915_private *dev_priv)
2381 {
2382         struct intel_guc_log *log = &dev_priv->guc.log;
2383         enum guc_log_buffer_type type;
2384
2385         if (!intel_guc_log_relay_enabled(log)) {
2386                 seq_puts(m, "GuC log relay disabled\n");
2387                 return;
2388         }
2389
2390         seq_puts(m, "GuC logging stats:\n");
2391
2392         seq_printf(m, "\tRelay full count: %u\n",
2393                    log->relay.full_count);
2394
2395         for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
2396                 seq_printf(m, "\t%s:\tflush count %10u, overflow count %10u\n",
2397                            stringify_guc_log_type(type),
2398                            log->stats[type].flush,
2399                            log->stats[type].sampled_overflow);
2400         }
2401 }
2402
2403 static void i915_guc_client_info(struct seq_file *m,
2404                                  struct drm_i915_private *dev_priv,
2405                                  struct intel_guc_client *client)
2406 {
2407         struct intel_engine_cs *engine;
2408         enum intel_engine_id id;
2409         uint64_t tot = 0;
2410
2411         seq_printf(m, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n",
2412                 client->priority, client->stage_id, client->proc_desc_offset);
2413         seq_printf(m, "\tDoorbell id %d, offset: 0x%lx\n",
2414                 client->doorbell_id, client->doorbell_offset);
2415
2416         for_each_engine(engine, dev_priv, id) {
2417                 u64 submissions = client->submissions[id];
2418                 tot += submissions;
2419                 seq_printf(m, "\tSubmissions: %llu %s\n",
2420                                 submissions, engine->name);
2421         }
2422         seq_printf(m, "\tTotal: %llu\n", tot);
2423 }
2424
2425 static int i915_guc_info(struct seq_file *m, void *data)
2426 {
2427         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2428         const struct intel_guc *guc = &dev_priv->guc;
2429
2430         if (!USES_GUC(dev_priv))
2431                 return -ENODEV;
2432
2433         i915_guc_log_info(m, dev_priv);
2434
2435         if (!USES_GUC_SUBMISSION(dev_priv))
2436                 return 0;
2437
2438         GEM_BUG_ON(!guc->execbuf_client);
2439
2440         seq_printf(m, "\nDoorbell map:\n");
2441         seq_printf(m, "\t%*pb\n", GUC_NUM_DOORBELLS, guc->doorbell_bitmap);
2442         seq_printf(m, "Doorbell next cacheline: 0x%x\n", guc->db_cacheline);
2443
2444         seq_printf(m, "\nGuC execbuf client @ %p:\n", guc->execbuf_client);
2445         i915_guc_client_info(m, dev_priv, guc->execbuf_client);
2446         if (guc->preempt_client) {
2447                 seq_printf(m, "\nGuC preempt client @ %p:\n",
2448                            guc->preempt_client);
2449                 i915_guc_client_info(m, dev_priv, guc->preempt_client);
2450         }
2451
2452         /* Add more as required ... */
2453
2454         return 0;
2455 }
2456
2457 static int i915_guc_stage_pool(struct seq_file *m, void *data)
2458 {
2459         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2460         const struct intel_guc *guc = &dev_priv->guc;
2461         struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr;
2462         struct intel_guc_client *client = guc->execbuf_client;
2463         unsigned int tmp;
2464         int index;
2465
2466         if (!USES_GUC_SUBMISSION(dev_priv))
2467                 return -ENODEV;
2468
2469         for (index = 0; index < GUC_MAX_STAGE_DESCRIPTORS; index++, desc++) {
2470                 struct intel_engine_cs *engine;
2471
2472                 if (!(desc->attribute & GUC_STAGE_DESC_ATTR_ACTIVE))
2473                         continue;
2474
2475                 seq_printf(m, "GuC stage descriptor %u:\n", index);
2476                 seq_printf(m, "\tIndex: %u\n", desc->stage_id);
2477                 seq_printf(m, "\tAttribute: 0x%x\n", desc->attribute);
2478                 seq_printf(m, "\tPriority: %d\n", desc->priority);
2479                 seq_printf(m, "\tDoorbell id: %d\n", desc->db_id);
2480                 seq_printf(m, "\tEngines used: 0x%x\n",
2481                            desc->engines_used);
2482                 seq_printf(m, "\tDoorbell trigger phy: 0x%llx, cpu: 0x%llx, uK: 0x%x\n",
2483                            desc->db_trigger_phy,
2484                            desc->db_trigger_cpu,
2485                            desc->db_trigger_uk);
2486                 seq_printf(m, "\tProcess descriptor: 0x%x\n",
2487                            desc->process_desc);
2488                 seq_printf(m, "\tWorkqueue address: 0x%x, size: 0x%x\n",
2489                            desc->wq_addr, desc->wq_size);
2490                 seq_putc(m, '\n');
2491
2492                 for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
2493                         u32 guc_engine_id = engine->guc_id;
2494                         struct guc_execlist_context *lrc =
2495                                                 &desc->lrc[guc_engine_id];
2496
2497                         seq_printf(m, "\t%s LRC:\n", engine->name);
2498                         seq_printf(m, "\t\tContext desc: 0x%x\n",
2499                                    lrc->context_desc);
2500                         seq_printf(m, "\t\tContext id: 0x%x\n", lrc->context_id);
2501                         seq_printf(m, "\t\tLRCA: 0x%x\n", lrc->ring_lrca);
2502                         seq_printf(m, "\t\tRing begin: 0x%x\n", lrc->ring_begin);
2503                         seq_printf(m, "\t\tRing end: 0x%x\n", lrc->ring_end);
2504                         seq_putc(m, '\n');
2505                 }
2506         }
2507
2508         return 0;
2509 }
2510
2511 static int i915_guc_log_dump(struct seq_file *m, void *data)
2512 {
2513         struct drm_info_node *node = m->private;
2514         struct drm_i915_private *dev_priv = node_to_i915(node);
2515         bool dump_load_err = !!node->info_ent->data;
2516         struct drm_i915_gem_object *obj = NULL;
2517         u32 *log;
2518         int i = 0;
2519
2520         if (!HAS_GUC(dev_priv))
2521                 return -ENODEV;
2522
2523         if (dump_load_err)
2524                 obj = dev_priv->guc.load_err_log;
2525         else if (dev_priv->guc.log.vma)
2526                 obj = dev_priv->guc.log.vma->obj;
2527
2528         if (!obj)
2529                 return 0;
2530
2531         log = i915_gem_object_pin_map(obj, I915_MAP_WC);
2532         if (IS_ERR(log)) {
2533                 DRM_DEBUG("Failed to pin object\n");
2534                 seq_puts(m, "(log data unaccessible)\n");
2535                 return PTR_ERR(log);
2536         }
2537
2538         for (i = 0; i < obj->base.size / sizeof(u32); i += 4)
2539                 seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n",
2540                            *(log + i), *(log + i + 1),
2541                            *(log + i + 2), *(log + i + 3));
2542
2543         seq_putc(m, '\n');
2544
2545         i915_gem_object_unpin_map(obj);
2546
2547         return 0;
2548 }
2549
2550 static int i915_guc_log_level_get(void *data, u64 *val)
2551 {
2552         struct drm_i915_private *dev_priv = data;
2553
2554         if (!USES_GUC(dev_priv))
2555                 return -ENODEV;
2556
2557         *val = intel_guc_log_get_level(&dev_priv->guc.log);
2558
2559         return 0;
2560 }
2561
2562 static int i915_guc_log_level_set(void *data, u64 val)
2563 {
2564         struct drm_i915_private *dev_priv = data;
2565
2566         if (!USES_GUC(dev_priv))
2567                 return -ENODEV;
2568
2569         return intel_guc_log_set_level(&dev_priv->guc.log, val);
2570 }
2571
2572 DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_level_fops,
2573                         i915_guc_log_level_get, i915_guc_log_level_set,
2574                         "%lld\n");
2575
2576 static int i915_guc_log_relay_open(struct inode *inode, struct file *file)
2577 {
2578         struct drm_i915_private *dev_priv = inode->i_private;
2579
2580         if (!USES_GUC(dev_priv))
2581                 return -ENODEV;
2582
2583         file->private_data = &dev_priv->guc.log;
2584
2585         return intel_guc_log_relay_open(&dev_priv->guc.log);
2586 }
2587
2588 static ssize_t
2589 i915_guc_log_relay_write(struct file *filp,
2590                          const char __user *ubuf,
2591                          size_t cnt,
2592                          loff_t *ppos)
2593 {
2594         struct intel_guc_log *log = filp->private_data;
2595
2596         intel_guc_log_relay_flush(log);
2597
2598         return cnt;
2599 }
2600
2601 static int i915_guc_log_relay_release(struct inode *inode, struct file *file)
2602 {
2603         struct drm_i915_private *dev_priv = inode->i_private;
2604
2605         intel_guc_log_relay_close(&dev_priv->guc.log);
2606
2607         return 0;
2608 }
2609
2610 static const struct file_operations i915_guc_log_relay_fops = {
2611         .owner = THIS_MODULE,
2612         .open = i915_guc_log_relay_open,
2613         .write = i915_guc_log_relay_write,
2614         .release = i915_guc_log_relay_release,
2615 };
2616
2617 static int i915_psr_sink_status_show(struct seq_file *m, void *data)
2618 {
2619         u8 val;
2620         static const char * const sink_status[] = {
2621                 "inactive",
2622                 "transition to active, capture and display",
2623                 "active, display from RFB",
2624                 "active, capture and display on sink device timings",
2625                 "transition to inactive, capture and display, timing re-sync",
2626                 "reserved",
2627                 "reserved",
2628                 "sink internal error",
2629         };
2630         struct drm_connector *connector = m->private;
2631         struct drm_i915_private *dev_priv = to_i915(connector->dev);
2632         struct intel_dp *intel_dp =
2633                 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
2634         int ret;
2635
2636         if (!CAN_PSR(dev_priv)) {
2637                 seq_puts(m, "PSR Unsupported\n");
2638                 return -ENODEV;
2639         }
2640
2641         if (connector->status != connector_status_connected)
2642                 return -ENODEV;
2643
2644         ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
2645
2646         if (ret == 1) {
2647                 const char *str = "unknown";
2648
2649                 val &= DP_PSR_SINK_STATE_MASK;
2650                 if (val < ARRAY_SIZE(sink_status))
2651                         str = sink_status[val];
2652                 seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
2653         } else {
2654                 return ret;
2655         }
2656
2657         return 0;
2658 }
2659 DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
2660
2661 static void
2662 psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
2663 {
2664         u32 val, psr_status;
2665
2666         if (dev_priv->psr.psr2_enabled) {
2667                 static const char * const live_status[] = {
2668                         "IDLE",
2669                         "CAPTURE",
2670                         "CAPTURE_FS",
2671                         "SLEEP",
2672                         "BUFON_FW",
2673                         "ML_UP",
2674                         "SU_STANDBY",
2675                         "FAST_SLEEP",
2676                         "DEEP_SLEEP",
2677                         "BUF_ON",
2678                         "TG_ON"
2679                 };
2680                 psr_status = I915_READ(EDP_PSR2_STATUS);
2681                 val = (psr_status & EDP_PSR2_STATUS_STATE_MASK) >>
2682                         EDP_PSR2_STATUS_STATE_SHIFT;
2683                 if (val < ARRAY_SIZE(live_status)) {
2684                         seq_printf(m, "Source PSR status: 0x%x [%s]\n",
2685                                    psr_status, live_status[val]);
2686                         return;
2687                 }
2688         } else {
2689                 static const char * const live_status[] = {
2690                         "IDLE",
2691                         "SRDONACK",
2692                         "SRDENT",
2693                         "BUFOFF",
2694                         "BUFON",
2695                         "AUXACK",
2696                         "SRDOFFACK",
2697                         "SRDENT_ON",
2698                 };
2699                 psr_status = I915_READ(EDP_PSR_STATUS);
2700                 val = (psr_status & EDP_PSR_STATUS_STATE_MASK) >>
2701                         EDP_PSR_STATUS_STATE_SHIFT;
2702                 if (val < ARRAY_SIZE(live_status)) {
2703                         seq_printf(m, "Source PSR status: 0x%x [%s]\n",
2704                                    psr_status, live_status[val]);
2705                         return;
2706                 }
2707         }
2708
2709         seq_printf(m, "Source PSR status: 0x%x [%s]\n", psr_status, "unknown");
2710 }
2711
2712 static int i915_edp_psr_status(struct seq_file *m, void *data)
2713 {
2714         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2715         u32 psrperf = 0;
2716         bool enabled = false;
2717         bool sink_support;
2718
2719         if (!HAS_PSR(dev_priv))
2720                 return -ENODEV;
2721
2722         sink_support = dev_priv->psr.sink_support;
2723         seq_printf(m, "Sink_Support: %s\n", yesno(sink_support));
2724         if (!sink_support)
2725                 return 0;
2726
2727         intel_runtime_pm_get(dev_priv);
2728
2729         mutex_lock(&dev_priv->psr.lock);
2730         seq_printf(m, "PSR mode: %s\n",
2731                    dev_priv->psr.psr2_enabled ? "PSR2" : "PSR1");
2732         seq_printf(m, "Enabled: %s\n", yesno(dev_priv->psr.enabled));
2733         seq_printf(m, "Busy frontbuffer bits: 0x%03x\n",
2734                    dev_priv->psr.busy_frontbuffer_bits);
2735
2736         if (dev_priv->psr.psr2_enabled)
2737                 enabled = I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE;
2738         else
2739                 enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
2740
2741         seq_printf(m, "Main link in standby mode: %s\n",
2742                    yesno(dev_priv->psr.link_standby));
2743
2744         seq_printf(m, "HW Enabled & Active bit: %s\n", yesno(enabled));
2745
2746         /*
2747          * SKL+ Perf counter is reset to 0 everytime DC state is entered
2748          */
2749         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
2750                 psrperf = I915_READ(EDP_PSR_PERF_CNT) &
2751                         EDP_PSR_PERF_CNT_MASK;
2752
2753                 seq_printf(m, "Performance_Counter: %u\n", psrperf);
2754         }
2755
2756         psr_source_status(dev_priv, m);
2757         mutex_unlock(&dev_priv->psr.lock);
2758
2759         if (READ_ONCE(dev_priv->psr.debug) & I915_PSR_DEBUG_IRQ) {
2760                 seq_printf(m, "Last attempted entry at: %lld\n",
2761                            dev_priv->psr.last_entry_attempt);
2762                 seq_printf(m, "Last exit at: %lld\n",
2763                            dev_priv->psr.last_exit);
2764         }
2765
2766         intel_runtime_pm_put(dev_priv);
2767         return 0;
2768 }
2769
2770 static int
2771 i915_edp_psr_debug_set(void *data, u64 val)
2772 {
2773         struct drm_i915_private *dev_priv = data;
2774         struct drm_modeset_acquire_ctx ctx;
2775         int ret;
2776
2777         if (!CAN_PSR(dev_priv))
2778                 return -ENODEV;
2779
2780         DRM_DEBUG_KMS("Setting PSR debug to %llx\n", val);
2781
2782         intel_runtime_pm_get(dev_priv);
2783
2784         drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
2785
2786 retry:
2787         ret = intel_psr_set_debugfs_mode(dev_priv, &ctx, val);
2788         if (ret == -EDEADLK) {
2789                 ret = drm_modeset_backoff(&ctx);
2790                 if (!ret)
2791                         goto retry;
2792         }
2793
2794         drm_modeset_drop_locks(&ctx);
2795         drm_modeset_acquire_fini(&ctx);
2796
2797         intel_runtime_pm_put(dev_priv);
2798
2799         return ret;
2800 }
2801
2802 static int
2803 i915_edp_psr_debug_get(void *data, u64 *val)
2804 {
2805         struct drm_i915_private *dev_priv = data;
2806
2807         if (!CAN_PSR(dev_priv))
2808                 return -ENODEV;
2809
2810         *val = READ_ONCE(dev_priv->psr.debug);
2811         return 0;
2812 }
2813
2814 DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
2815                         i915_edp_psr_debug_get, i915_edp_psr_debug_set,
2816                         "%llu\n");
2817
2818 static int i915_energy_uJ(struct seq_file *m, void *data)
2819 {
2820         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2821         unsigned long long power;
2822         u32 units;
2823
2824         if (INTEL_GEN(dev_priv) < 6)
2825                 return -ENODEV;
2826
2827         intel_runtime_pm_get(dev_priv);
2828
2829         if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power)) {
2830                 intel_runtime_pm_put(dev_priv);
2831                 return -ENODEV;
2832         }
2833
2834         units = (power & 0x1f00) >> 8;
2835         power = I915_READ(MCH_SECP_NRG_STTS);
2836         power = (1000000 * power) >> units; /* convert to uJ */
2837
2838         intel_runtime_pm_put(dev_priv);
2839
2840         seq_printf(m, "%llu", power);
2841
2842         return 0;
2843 }
2844
2845 static int i915_runtime_pm_status(struct seq_file *m, void *unused)
2846 {
2847         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2848         struct pci_dev *pdev = dev_priv->drm.pdev;
2849
2850         if (!HAS_RUNTIME_PM(dev_priv))
2851                 seq_puts(m, "Runtime power management not supported\n");
2852
2853         seq_printf(m, "GPU idle: %s (epoch %u)\n",
2854                    yesno(!dev_priv->gt.awake), dev_priv->gt.epoch);
2855         seq_printf(m, "IRQs disabled: %s\n",
2856                    yesno(!intel_irqs_enabled(dev_priv)));
2857 #ifdef CONFIG_PM
2858         seq_printf(m, "Usage count: %d\n",
2859                    atomic_read(&dev_priv->drm.dev->power.usage_count));
2860 #else
2861         seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
2862 #endif
2863         seq_printf(m, "PCI device power state: %s [%d]\n",
2864                    pci_power_name(pdev->current_state),
2865                    pdev->current_state);
2866
2867         return 0;
2868 }
2869
2870 static int i915_power_domain_info(struct seq_file *m, void *unused)
2871 {
2872         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2873         struct i915_power_domains *power_domains = &dev_priv->power_domains;
2874         int i;
2875
2876         mutex_lock(&power_domains->lock);
2877
2878         seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
2879         for (i = 0; i < power_domains->power_well_count; i++) {
2880                 struct i915_power_well *power_well;
2881                 enum intel_display_power_domain power_domain;
2882
2883                 power_well = &power_domains->power_wells[i];
2884                 seq_printf(m, "%-25s %d\n", power_well->desc->name,
2885                            power_well->count);
2886
2887                 for_each_power_domain(power_domain, power_well->desc->domains)
2888                         seq_printf(m, "  %-23s %d\n",
2889                                  intel_display_power_domain_str(power_domain),
2890                                  power_domains->domain_use_count[power_domain]);
2891         }
2892
2893         mutex_unlock(&power_domains->lock);
2894
2895         return 0;
2896 }
2897
2898 static int i915_dmc_info(struct seq_file *m, void *unused)
2899 {
2900         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2901         struct intel_csr *csr;
2902
2903         if (!HAS_CSR(dev_priv))
2904                 return -ENODEV;
2905
2906         csr = &dev_priv->csr;
2907
2908         intel_runtime_pm_get(dev_priv);
2909
2910         seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
2911         seq_printf(m, "path: %s\n", csr->fw_path);
2912
2913         if (!csr->dmc_payload)
2914                 goto out;
2915
2916         seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
2917                    CSR_VERSION_MINOR(csr->version));
2918
2919         if (IS_BROXTON(dev_priv)) {
2920                 seq_printf(m, "DC3 -> DC5 count: %d\n",
2921                            I915_READ(BXT_CSR_DC3_DC5_COUNT));
2922         } else if (IS_GEN(dev_priv, 9, 11)) {
2923                 seq_printf(m, "DC3 -> DC5 count: %d\n",
2924                            I915_READ(SKL_CSR_DC3_DC5_COUNT));
2925                 seq_printf(m, "DC5 -> DC6 count: %d\n",
2926                            I915_READ(SKL_CSR_DC5_DC6_COUNT));
2927         }
2928
2929 out:
2930         seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
2931         seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE));
2932         seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL));
2933
2934         intel_runtime_pm_put(dev_priv);
2935
2936         return 0;
2937 }
2938
2939 static void intel_seq_print_mode(struct seq_file *m, int tabs,
2940                                  struct drm_display_mode *mode)
2941 {
2942         int i;
2943
2944         for (i = 0; i < tabs; i++)
2945                 seq_putc(m, '\t');
2946
2947         seq_printf(m, "id %d:\"%s\" freq %d clock %d hdisp %d hss %d hse %d htot %d vdisp %d vss %d vse %d vtot %d type 0x%x flags 0x%x\n",
2948                    mode->base.id, mode->name,
2949                    mode->vrefresh, mode->clock,
2950                    mode->hdisplay, mode->hsync_start,
2951                    mode->hsync_end, mode->htotal,
2952                    mode->vdisplay, mode->vsync_start,
2953                    mode->vsync_end, mode->vtotal,
2954                    mode->type, mode->flags);
2955 }
2956
2957 static void intel_encoder_info(struct seq_file *m,
2958                                struct intel_crtc *intel_crtc,
2959                                struct intel_encoder *intel_encoder)
2960 {
2961         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2962         struct drm_device *dev = &dev_priv->drm;
2963         struct drm_crtc *crtc = &intel_crtc->base;
2964         struct intel_connector *intel_connector;
2965         struct drm_encoder *encoder;
2966
2967         encoder = &intel_encoder->base;
2968         seq_printf(m, "\tencoder %d: type: %s, connectors:\n",
2969                    encoder->base.id, encoder->name);
2970         for_each_connector_on_encoder(dev, encoder, intel_connector) {
2971                 struct drm_connector *connector = &intel_connector->base;
2972                 seq_printf(m, "\t\tconnector %d: type: %s, status: %s",
2973                            connector->base.id,
2974                            connector->name,
2975                            drm_get_connector_status_name(connector->status));
2976                 if (connector->status == connector_status_connected) {
2977                         struct drm_display_mode *mode = &crtc->mode;
2978                         seq_printf(m, ", mode:\n");
2979                         intel_seq_print_mode(m, 2, mode);
2980                 } else {
2981                         seq_putc(m, '\n');
2982                 }
2983         }
2984 }
2985
2986 static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc)
2987 {
2988         struct drm_i915_private *dev_priv = node_to_i915(m->private);
2989         struct drm_device *dev = &dev_priv->drm;
2990         struct drm_crtc *crtc = &intel_crtc->base;
2991         struct intel_encoder *intel_encoder;
2992         struct drm_plane_state *plane_state = crtc->primary->state;
2993         struct drm_framebuffer *fb = plane_state->fb;
2994
2995         if (fb)
2996                 seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n",
2997                            fb->base.id, plane_state->src_x >> 16,
2998                            plane_state->src_y >> 16, fb->width, fb->height);
2999         else
3000                 seq_puts(m, "\tprimary plane disabled\n");
3001         for_each_encoder_on_crtc(dev, crtc, intel_encoder)
3002                 intel_encoder_info(m, intel_crtc, intel_encoder);
3003 }
3004
3005 static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
3006 {
3007         struct drm_display_mode *mode = panel->fixed_mode;
3008
3009         seq_printf(m, "\tfixed mode:\n");
3010         intel_seq_print_mode(m, 2, mode);
3011 }
3012
3013 static void intel_dp_info(struct seq_file *m,
3014                           struct intel_connector *intel_connector)
3015 {
3016         struct intel_encoder *intel_encoder = intel_connector->encoder;
3017         struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
3018
3019         seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
3020         seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
3021         if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
3022                 intel_panel_info(m, &intel_connector->panel);
3023
3024         drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
3025                                 &intel_dp->aux);
3026 }
3027
3028 static void intel_dp_mst_info(struct seq_file *m,
3029                           struct intel_connector *intel_connector)
3030 {
3031         struct intel_encoder *intel_encoder = intel_connector->encoder;
3032         struct intel_dp_mst_encoder *intel_mst =
3033                 enc_to_mst(&intel_encoder->base);
3034         struct intel_digital_port *intel_dig_port = intel_mst->primary;
3035         struct intel_dp *intel_dp = &intel_dig_port->dp;
3036         bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
3037                                         intel_connector->port);
3038
3039         seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
3040 }
3041
3042 static void intel_hdmi_info(struct seq_file *m,
3043                             struct intel_connector *intel_connector)
3044 {
3045         struct intel_encoder *intel_encoder = intel_connector->encoder;
3046         struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&intel_encoder->base);
3047
3048         seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
3049 }
3050
3051 static void intel_lvds_info(struct seq_file *m,
3052                             struct intel_connector *intel_connector)
3053 {
3054         intel_panel_info(m, &intel_connector->panel);
3055 }
3056
3057 static void intel_connector_info(struct seq_file *m,
3058                                  struct drm_connector *connector)
3059 {
3060         struct intel_connector *intel_connector = to_intel_connector(connector);
3061         struct intel_encoder *intel_encoder = intel_connector->encoder;
3062         struct drm_display_mode *mode;
3063
3064         seq_printf(m, "connector %d: type %s, status: %s\n",
3065                    connector->base.id, connector->name,
3066                    drm_get_connector_status_name(connector->status));
3067         if (connector->status == connector_status_connected) {
3068                 seq_printf(m, "\tname: %s\n", connector->display_info.name);
3069                 seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
3070                            connector->display_info.width_mm,
3071                            connector->display_info.height_mm);
3072                 seq_printf(m, "\tsubpixel order: %s\n",
3073                            drm_get_subpixel_order_name(connector->display_info.subpixel_order));
3074                 seq_printf(m, "\tCEA rev: %d\n",
3075                            connector->display_info.cea_rev);
3076         }
3077
3078         if (!intel_encoder)
3079                 return;
3080
3081         switch (connector->connector_type) {
3082         case DRM_MODE_CONNECTOR_DisplayPort:
3083         case DRM_MODE_CONNECTOR_eDP:
3084                 if (intel_encoder->type == INTEL_OUTPUT_DP_MST)
3085                         intel_dp_mst_info(m, intel_connector);
3086                 else
3087                         intel_dp_info(m, intel_connector);
3088                 break;
3089         case DRM_MODE_CONNECTOR_LVDS:
3090                 if (intel_encoder->type == INTEL_OUTPUT_LVDS)
3091                         intel_lvds_info(m, intel_connector);
3092                 break;
3093         case DRM_MODE_CONNECTOR_HDMIA:
3094                 if (intel_encoder->type == INTEL_OUTPUT_HDMI ||
3095                     intel_encoder->type == INTEL_OUTPUT_DDI)
3096                         intel_hdmi_info(m, intel_connector);
3097                 break;
3098         default:
3099                 break;
3100         }
3101
3102         seq_printf(m, "\tmodes:\n");
3103         list_for_each_entry(mode, &connector->modes, head)
3104                 intel_seq_print_mode(m, 2, mode);
3105 }
3106
3107 static const char *plane_type(enum drm_plane_type type)
3108 {
3109         switch (type) {
3110         case DRM_PLANE_TYPE_OVERLAY:
3111                 return "OVL";
3112         case DRM_PLANE_TYPE_PRIMARY:
3113                 return "PRI";
3114         case DRM_PLANE_TYPE_CURSOR:
3115                 return "CUR";
3116         /*
3117          * Deliberately omitting default: to generate compiler warnings
3118          * when a new drm_plane_type gets added.
3119          */
3120         }
3121
3122         return "unknown";
3123 }
3124
3125 static const char *plane_rotation(unsigned int rotation)
3126 {
3127         static char buf[48];
3128         /*
3129          * According to doc only one DRM_MODE_ROTATE_ is allowed but this
3130          * will print them all to visualize if the values are misused
3131          */
3132         snprintf(buf, sizeof(buf),
3133                  "%s%s%s%s%s%s(0x%08x)",
3134                  (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
3135                  (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
3136                  (rotation & DRM_MODE_ROTATE_180) ? "180 " : "",
3137                  (rotation & DRM_MODE_ROTATE_270) ? "270 " : "",
3138                  (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
3139                  (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
3140                  rotation);
3141
3142         return buf;
3143 }
3144
3145 static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
3146 {
3147         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3148         struct drm_device *dev = &dev_priv->drm;
3149         struct intel_plane *intel_plane;
3150
3151         for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
3152                 struct drm_plane_state *state;
3153                 struct drm_plane *plane = &intel_plane->base;
3154                 struct drm_format_name_buf format_name;
3155
3156                 if (!plane->state) {
3157                         seq_puts(m, "plane->state is NULL!\n");
3158                         continue;
3159                 }
3160
3161                 state = plane->state;
3162
3163                 if (state->fb) {
3164                         drm_get_format_name(state->fb->format->format,
3165                                             &format_name);
3166                 } else {
3167                         sprintf(format_name.str, "N/A");
3168                 }
3169
3170                 seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n",
3171                            plane->base.id,
3172                            plane_type(intel_plane->base.type),
3173                            state->crtc_x, state->crtc_y,
3174                            state->crtc_w, state->crtc_h,
3175                            (state->src_x >> 16),
3176                            ((state->src_x & 0xffff) * 15625) >> 10,
3177                            (state->src_y >> 16),
3178                            ((state->src_y & 0xffff) * 15625) >> 10,
3179                            (state->src_w >> 16),
3180                            ((state->src_w & 0xffff) * 15625) >> 10,
3181                            (state->src_h >> 16),
3182                            ((state->src_h & 0xffff) * 15625) >> 10,
3183                            format_name.str,
3184                            plane_rotation(state->rotation));
3185         }
3186 }
3187
3188 static void intel_scaler_info(struct seq_file *m, struct intel_crtc *intel_crtc)
3189 {
3190         struct intel_crtc_state *pipe_config;
3191         int num_scalers = intel_crtc->num_scalers;
3192         int i;
3193
3194         pipe_config = to_intel_crtc_state(intel_crtc->base.state);
3195
3196         /* Not all platformas have a scaler */
3197         if (num_scalers) {
3198                 seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
3199                            num_scalers,
3200                            pipe_config->scaler_state.scaler_users,
3201                            pipe_config->scaler_state.scaler_id);
3202
3203                 for (i = 0; i < num_scalers; i++) {
3204                         struct intel_scaler *sc =
3205                                         &pipe_config->scaler_state.scalers[i];
3206
3207                         seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
3208                                    i, yesno(sc->in_use), sc->mode);
3209                 }
3210                 seq_puts(m, "\n");
3211         } else {
3212                 seq_puts(m, "\tNo scalers available on this platform\n");
3213         }
3214 }
3215
3216 static int i915_display_info(struct seq_file *m, void *unused)
3217 {
3218         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3219         struct drm_device *dev = &dev_priv->drm;
3220         struct intel_crtc *crtc;
3221         struct drm_connector *connector;
3222         struct drm_connector_list_iter conn_iter;
3223
3224         intel_runtime_pm_get(dev_priv);
3225         seq_printf(m, "CRTC info\n");
3226         seq_printf(m, "---------\n");
3227         for_each_intel_crtc(dev, crtc) {
3228                 struct intel_crtc_state *pipe_config;
3229
3230                 drm_modeset_lock(&crtc->base.mutex, NULL);
3231                 pipe_config = to_intel_crtc_state(crtc->base.state);
3232
3233                 seq_printf(m, "CRTC %d: pipe: %c, active=%s, (size=%dx%d), dither=%s, bpp=%d\n",
3234                            crtc->base.base.id, pipe_name(crtc->pipe),
3235                            yesno(pipe_config->base.active),
3236                            pipe_config->pipe_src_w, pipe_config->pipe_src_h,
3237                            yesno(pipe_config->dither), pipe_config->pipe_bpp);
3238
3239                 if (pipe_config->base.active) {
3240                         struct intel_plane *cursor =
3241                                 to_intel_plane(crtc->base.cursor);
3242
3243                         intel_crtc_info(m, crtc);
3244
3245                         seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x\n",
3246                                    yesno(cursor->base.state->visible),
3247                                    cursor->base.state->crtc_x,
3248                                    cursor->base.state->crtc_y,
3249                                    cursor->base.state->crtc_w,
3250                                    cursor->base.state->crtc_h,
3251                                    cursor->cursor.base);
3252                         intel_scaler_info(m, crtc);
3253                         intel_plane_info(m, crtc);
3254                 }
3255
3256                 seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
3257                            yesno(!crtc->cpu_fifo_underrun_disabled),
3258                            yesno(!crtc->pch_fifo_underrun_disabled));
3259                 drm_modeset_unlock(&crtc->base.mutex);
3260         }
3261
3262         seq_printf(m, "\n");
3263         seq_printf(m, "Connector info\n");
3264         seq_printf(m, "--------------\n");
3265         mutex_lock(&dev->mode_config.mutex);
3266         drm_connector_list_iter_begin(dev, &conn_iter);
3267         drm_for_each_connector_iter(connector, &conn_iter)
3268                 intel_connector_info(m, connector);
3269         drm_connector_list_iter_end(&conn_iter);
3270         mutex_unlock(&dev->mode_config.mutex);
3271
3272         intel_runtime_pm_put(dev_priv);
3273
3274         return 0;
3275 }
3276
3277 static int i915_engine_info(struct seq_file *m, void *unused)
3278 {
3279         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3280         struct intel_engine_cs *engine;
3281         enum intel_engine_id id;
3282         struct drm_printer p;
3283
3284         intel_runtime_pm_get(dev_priv);
3285
3286         seq_printf(m, "GT awake? %s (epoch %u)\n",
3287                    yesno(dev_priv->gt.awake), dev_priv->gt.epoch);
3288         seq_printf(m, "Global active requests: %d\n",
3289                    dev_priv->gt.active_requests);
3290         seq_printf(m, "CS timestamp frequency: %u kHz\n",
3291                    dev_priv->info.cs_timestamp_frequency_khz);
3292
3293         p = drm_seq_file_printer(m);
3294         for_each_engine(engine, dev_priv, id)
3295                 intel_engine_dump(engine, &p, "%s\n", engine->name);
3296
3297         intel_runtime_pm_put(dev_priv);
3298
3299         return 0;
3300 }
3301
3302 static int i915_rcs_topology(struct seq_file *m, void *unused)
3303 {
3304         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3305         struct drm_printer p = drm_seq_file_printer(m);
3306
3307         intel_device_info_dump_topology(&INTEL_INFO(dev_priv)->sseu, &p);
3308
3309         return 0;
3310 }
3311
3312 static int i915_shrinker_info(struct seq_file *m, void *unused)
3313 {
3314         struct drm_i915_private *i915 = node_to_i915(m->private);
3315
3316         seq_printf(m, "seeks = %d\n", i915->mm.shrinker.seeks);
3317         seq_printf(m, "batch = %lu\n", i915->mm.shrinker.batch);
3318
3319         return 0;
3320 }
3321
3322 static int i915_shared_dplls_info(struct seq_file *m, void *unused)
3323 {
3324         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3325         struct drm_device *dev = &dev_priv->drm;
3326         int i;
3327
3328         drm_modeset_lock_all(dev);
3329         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3330                 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
3331
3332                 seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name,
3333                            pll->info->id);
3334                 seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
3335                            pll->state.crtc_mask, pll->active_mask, yesno(pll->on));
3336                 seq_printf(m, " tracked hardware state:\n");
3337                 seq_printf(m, " dpll:    0x%08x\n", pll->state.hw_state.dpll);
3338                 seq_printf(m, " dpll_md: 0x%08x\n",
3339                            pll->state.hw_state.dpll_md);
3340                 seq_printf(m, " fp0:     0x%08x\n", pll->state.hw_state.fp0);
3341                 seq_printf(m, " fp1:     0x%08x\n", pll->state.hw_state.fp1);
3342                 seq_printf(m, " wrpll:   0x%08x\n", pll->state.hw_state.wrpll);
3343                 seq_printf(m, " cfgcr0:  0x%08x\n", pll->state.hw_state.cfgcr0);
3344                 seq_printf(m, " cfgcr1:  0x%08x\n", pll->state.hw_state.cfgcr1);
3345                 seq_printf(m, " mg_refclkin_ctl:        0x%08x\n",
3346                            pll->state.hw_state.mg_refclkin_ctl);
3347                 seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n",
3348                            pll->state.hw_state.mg_clktop2_coreclkctl1);
3349                 seq_printf(m, " mg_clktop2_hsclkctl:    0x%08x\n",
3350                            pll->state.hw_state.mg_clktop2_hsclkctl);
3351                 seq_printf(m, " mg_pll_div0:  0x%08x\n",
3352                            pll->state.hw_state.mg_pll_div0);
3353                 seq_printf(m, " mg_pll_div1:  0x%08x\n",
3354                            pll->state.hw_state.mg_pll_div1);
3355                 seq_printf(m, " mg_pll_lf:    0x%08x\n",
3356                            pll->state.hw_state.mg_pll_lf);
3357                 seq_printf(m, " mg_pll_frac_lock: 0x%08x\n",
3358                            pll->state.hw_state.mg_pll_frac_lock);
3359                 seq_printf(m, " mg_pll_ssc:   0x%08x\n",
3360                            pll->state.hw_state.mg_pll_ssc);
3361                 seq_printf(m, " mg_pll_bias:  0x%08x\n",
3362                            pll->state.hw_state.mg_pll_bias);
3363                 seq_printf(m, " mg_pll_tdc_coldst_bias: 0x%08x\n",
3364                            pll->state.hw_state.mg_pll_tdc_coldst_bias);
3365         }
3366         drm_modeset_unlock_all(dev);
3367
3368         return 0;
3369 }
3370
3371 static int i915_wa_registers(struct seq_file *m, void *unused)
3372 {
3373         struct i915_workarounds *wa = &node_to_i915(m->private)->workarounds;
3374         int i;
3375
3376         seq_printf(m, "Workarounds applied: %d\n", wa->count);
3377         for (i = 0; i < wa->count; ++i)
3378                 seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n",
3379                            wa->reg[i].addr, wa->reg[i].value, wa->reg[i].mask);
3380
3381         return 0;
3382 }
3383
3384 static int i915_ipc_status_show(struct seq_file *m, void *data)
3385 {
3386         struct drm_i915_private *dev_priv = m->private;
3387
3388         seq_printf(m, "Isochronous Priority Control: %s\n",
3389                         yesno(dev_priv->ipc_enabled));
3390         return 0;
3391 }
3392
3393 static int i915_ipc_status_open(struct inode *inode, struct file *file)
3394 {
3395         struct drm_i915_private *dev_priv = inode->i_private;
3396
3397         if (!HAS_IPC(dev_priv))
3398                 return -ENODEV;
3399
3400         return single_open(file, i915_ipc_status_show, dev_priv);
3401 }
3402
3403 static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
3404                                      size_t len, loff_t *offp)
3405 {
3406         struct seq_file *m = file->private_data;
3407         struct drm_i915_private *dev_priv = m->private;
3408         int ret;
3409         bool enable;
3410
3411         ret = kstrtobool_from_user(ubuf, len, &enable);
3412         if (ret < 0)
3413                 return ret;
3414
3415         intel_runtime_pm_get(dev_priv);
3416         if (!dev_priv->ipc_enabled && enable)
3417                 DRM_INFO("Enabling IPC: WM will be proper only after next commit\n");
3418         dev_priv->wm.distrust_bios_wm = true;
3419         dev_priv->ipc_enabled = enable;
3420         intel_enable_ipc(dev_priv);
3421         intel_runtime_pm_put(dev_priv);
3422
3423         return len;
3424 }
3425
3426 static const struct file_operations i915_ipc_status_fops = {
3427         .owner = THIS_MODULE,
3428         .open = i915_ipc_status_open,
3429         .read = seq_read,
3430         .llseek = seq_lseek,
3431         .release = single_release,
3432         .write = i915_ipc_status_write
3433 };
3434
3435 static int i915_ddb_info(struct seq_file *m, void *unused)
3436 {
3437         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3438         struct drm_device *dev = &dev_priv->drm;
3439         struct skl_ddb_allocation *ddb;
3440         struct skl_ddb_entry *entry;
3441         enum pipe pipe;
3442         int plane;
3443
3444         if (INTEL_GEN(dev_priv) < 9)
3445                 return -ENODEV;
3446
3447         drm_modeset_lock_all(dev);
3448
3449         ddb = &dev_priv->wm.skl_hw.ddb;
3450
3451         seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
3452
3453         for_each_pipe(dev_priv, pipe) {
3454                 seq_printf(m, "Pipe %c\n", pipe_name(pipe));
3455
3456                 for_each_universal_plane(dev_priv, pipe, plane) {
3457                         entry = &ddb->plane[pipe][plane];
3458                         seq_printf(m, "  Plane%-8d%8u%8u%8u\n", plane + 1,
3459                                    entry->start, entry->end,
3460                                    skl_ddb_entry_size(entry));
3461                 }
3462
3463                 entry = &ddb->plane[pipe][PLANE_CURSOR];
3464                 seq_printf(m, "  %-13s%8u%8u%8u\n", "Cursor", entry->start,
3465                            entry->end, skl_ddb_entry_size(entry));
3466         }
3467
3468         drm_modeset_unlock_all(dev);
3469
3470         return 0;
3471 }
3472
3473 static void drrs_status_per_crtc(struct seq_file *m,
3474                                  struct drm_device *dev,
3475                                  struct intel_crtc *intel_crtc)
3476 {
3477         struct drm_i915_private *dev_priv = to_i915(dev);
3478         struct i915_drrs *drrs = &dev_priv->drrs;
3479         int vrefresh = 0;
3480         struct drm_connector *connector;
3481         struct drm_connector_list_iter conn_iter;
3482
3483         drm_connector_list_iter_begin(dev, &conn_iter);
3484         drm_for_each_connector_iter(connector, &conn_iter) {
3485                 if (connector->state->crtc != &intel_crtc->base)
3486                         continue;
3487
3488                 seq_printf(m, "%s:\n", connector->name);
3489         }
3490         drm_connector_list_iter_end(&conn_iter);
3491
3492         if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT)
3493                 seq_puts(m, "\tVBT: DRRS_type: Static");
3494         else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT)
3495                 seq_puts(m, "\tVBT: DRRS_type: Seamless");
3496         else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED)
3497                 seq_puts(m, "\tVBT: DRRS_type: None");
3498         else
3499                 seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value");
3500
3501         seq_puts(m, "\n\n");
3502
3503         if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
3504                 struct intel_panel *panel;
3505
3506                 mutex_lock(&drrs->mutex);
3507                 /* DRRS Supported */
3508                 seq_puts(m, "\tDRRS Supported: Yes\n");
3509
3510                 /* disable_drrs() will make drrs->dp NULL */
3511                 if (!drrs->dp) {
3512                         seq_puts(m, "Idleness DRRS: Disabled\n");
3513                         if (dev_priv->psr.enabled)
3514                                 seq_puts(m,
3515                                 "\tAs PSR is enabled, DRRS is not enabled\n");
3516                         mutex_unlock(&drrs->mutex);
3517                         return;
3518                 }
3519
3520                 panel = &drrs->dp->attached_connector->panel;
3521                 seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
3522                                         drrs->busy_frontbuffer_bits);
3523
3524                 seq_puts(m, "\n\t\t");
3525                 if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
3526                         seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
3527                         vrefresh = panel->fixed_mode->vrefresh;
3528                 } else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
3529                         seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
3530                         vrefresh = panel->downclock_mode->vrefresh;
3531                 } else {
3532                         seq_printf(m, "DRRS_State: Unknown(%d)\n",
3533                                                 drrs->refresh_rate_type);
3534                         mutex_unlock(&drrs->mutex);
3535                         return;
3536                 }
3537                 seq_printf(m, "\t\tVrefresh: %d", vrefresh);
3538
3539                 seq_puts(m, "\n\t\t");
3540                 mutex_unlock(&drrs->mutex);
3541         } else {
3542                 /* DRRS not supported. Print the VBT parameter*/
3543                 seq_puts(m, "\tDRRS Supported : No");
3544         }
3545         seq_puts(m, "\n");
3546 }
3547
3548 static int i915_drrs_status(struct seq_file *m, void *unused)
3549 {
3550         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3551         struct drm_device *dev = &dev_priv->drm;
3552         struct intel_crtc *intel_crtc;
3553         int active_crtc_cnt = 0;
3554
3555         drm_modeset_lock_all(dev);
3556         for_each_intel_crtc(dev, intel_crtc) {
3557                 if (intel_crtc->base.state->active) {
3558                         active_crtc_cnt++;
3559                         seq_printf(m, "\nCRTC %d:  ", active_crtc_cnt);
3560
3561                         drrs_status_per_crtc(m, dev, intel_crtc);
3562                 }
3563         }
3564         drm_modeset_unlock_all(dev);
3565
3566         if (!active_crtc_cnt)
3567                 seq_puts(m, "No active crtc found\n");
3568
3569         return 0;
3570 }
3571
3572 static int i915_dp_mst_info(struct seq_file *m, void *unused)
3573 {
3574         struct drm_i915_private *dev_priv = node_to_i915(m->private);
3575         struct drm_device *dev = &dev_priv->drm;
3576         struct intel_encoder *intel_encoder;
3577         struct intel_digital_port *intel_dig_port;
3578         struct drm_connector *connector;
3579         struct drm_connector_list_iter conn_iter;
3580
3581         drm_connector_list_iter_begin(dev, &conn_iter);
3582         drm_for_each_connector_iter(connector, &conn_iter) {
3583                 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
3584                         continue;
3585
3586                 intel_encoder = intel_attached_encoder(connector);
3587                 if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
3588                         continue;
3589
3590                 intel_dig_port = enc_to_dig_port(&intel_encoder->base);
3591                 if (!intel_dig_port->dp.can_mst)
3592                         continue;
3593
3594                 seq_printf(m, "MST Source Port %c\n",
3595                            port_name(intel_dig_port->base.port));
3596                 drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
3597         }
3598         drm_connector_list_iter_end(&conn_iter);
3599
3600         return 0;
3601 }
3602
3603 static ssize_t i915_displayport_test_active_write(struct file *file,
3604                                                   const char __user *ubuf,
3605                                                   size_t len, loff_t *offp)
3606 {
3607         char *input_buffer;
3608         int status = 0;
3609         struct drm_device *dev;
3610         struct drm_connector *connector;
3611         struct drm_connector_list_iter conn_iter;
3612         struct intel_dp *intel_dp;
3613         int val = 0;
3614
3615         dev = ((struct seq_file *)file->private_data)->private;
3616
3617         if (len == 0)
3618                 return 0;
3619
3620         input_buffer = memdup_user_nul(ubuf, len);
3621         if (IS_ERR(input_buffer))
3622                 return PTR_ERR(input_buffer);
3623
3624         DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);
3625
3626         drm_connector_list_iter_begin(dev, &conn_iter);
3627         drm_for_each_connector_iter(connector, &conn_iter) {
3628                 struct intel_encoder *encoder;
3629
3630                 if (connector->connector_type !=
3631                     DRM_MODE_CONNECTOR_DisplayPort)
3632                         continue;
3633
3634                 encoder = to_intel_encoder(connector->encoder);
3635                 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3636                         continue;
3637
3638                 if (encoder && connector->status == connector_status_connected) {
3639                         intel_dp = enc_to_intel_dp(&encoder->base);
3640                         status = kstrtoint(input_buffer, 10, &val);
3641                         if (status < 0)
3642                                 break;
3643                         DRM_DEBUG_DRIVER("Got %d for test active\n", val);
3644                         /* To prevent erroneous activation of the compliance
3645                          * testing code, only accept an actual value of 1 here
3646                          */
3647                         if (val == 1)
3648                                 intel_dp->compliance.test_active = 1;
3649                         else
3650                                 intel_dp->compliance.test_active = 0;
3651                 }
3652         }
3653         drm_connector_list_iter_end(&conn_iter);
3654         kfree(input_buffer);
3655         if (status < 0)
3656                 return status;
3657
3658         *offp += len;
3659         return len;
3660 }
3661
3662 static int i915_displayport_test_active_show(struct seq_file *m, void *data)
3663 {
3664         struct drm_i915_private *dev_priv = m->private;
3665         struct drm_device *dev = &dev_priv->drm;
3666         struct drm_connector *connector;
3667         struct drm_connector_list_iter conn_iter;
3668         struct intel_dp *intel_dp;
3669
3670         drm_connector_list_iter_begin(dev, &conn_iter);
3671         drm_for_each_connector_iter(connector, &conn_iter) {
3672                 struct intel_encoder *encoder;
3673
3674                 if (connector->connector_type !=
3675                     DRM_MODE_CONNECTOR_DisplayPort)
3676                         continue;
3677
3678                 encoder = to_intel_encoder(connector->encoder);
3679                 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3680                         continue;
3681
3682                 if (encoder && connector->status == connector_status_connected) {
3683                         intel_dp = enc_to_intel_dp(&encoder->base);
3684                         if (intel_dp->compliance.test_active)
3685                                 seq_puts(m, "1");
3686                         else
3687                                 seq_puts(m, "0");
3688                 } else
3689                         seq_puts(m, "0");
3690         }
3691         drm_connector_list_iter_end(&conn_iter);
3692
3693         return 0;
3694 }
3695
3696 static int i915_displayport_test_active_open(struct inode *inode,
3697                                              struct file *file)
3698 {
3699         return single_open(file, i915_displayport_test_active_show,
3700                            inode->i_private);
3701 }
3702
3703 static const struct file_operations i915_displayport_test_active_fops = {
3704         .owner = THIS_MODULE,
3705         .open = i915_displayport_test_active_open,
3706         .read = seq_read,
3707         .llseek = seq_lseek,
3708         .release = single_release,
3709         .write = i915_displayport_test_active_write
3710 };
3711
3712 static int i915_displayport_test_data_show(struct seq_file *m, void *data)
3713 {
3714         struct drm_i915_private *dev_priv = m->private;
3715         struct drm_device *dev = &dev_priv->drm;
3716         struct drm_connector *connector;
3717         struct drm_connector_list_iter conn_iter;
3718         struct intel_dp *intel_dp;
3719
3720         drm_connector_list_iter_begin(dev, &conn_iter);
3721         drm_for_each_connector_iter(connector, &conn_iter) {
3722                 struct intel_encoder *encoder;
3723
3724                 if (connector->connector_type !=
3725                     DRM_MODE_CONNECTOR_DisplayPort)
3726                         continue;
3727
3728                 encoder = to_intel_encoder(connector->encoder);
3729                 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3730                         continue;
3731
3732                 if (encoder && connector->status == connector_status_connected) {
3733                         intel_dp = enc_to_intel_dp(&encoder->base);
3734                         if (intel_dp->compliance.test_type ==
3735                             DP_TEST_LINK_EDID_READ)
3736                                 seq_printf(m, "%lx",
3737                                            intel_dp->compliance.test_data.edid);
3738                         else if (intel_dp->compliance.test_type ==
3739                                  DP_TEST_LINK_VIDEO_PATTERN) {
3740                                 seq_printf(m, "hdisplay: %d\n",
3741                                            intel_dp->compliance.test_data.hdisplay);
3742                                 seq_printf(m, "vdisplay: %d\n",
3743                                            intel_dp->compliance.test_data.vdisplay);
3744                                 seq_printf(m, "bpc: %u\n",
3745                                            intel_dp->compliance.test_data.bpc);
3746                         }
3747                 } else
3748                         seq_puts(m, "0");
3749         }
3750         drm_connector_list_iter_end(&conn_iter);
3751
3752         return 0;
3753 }
3754 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data);
3755
3756 static int i915_displayport_test_type_show(struct seq_file *m, void *data)
3757 {
3758         struct drm_i915_private *dev_priv = m->private;
3759         struct drm_device *dev = &dev_priv->drm;
3760         struct drm_connector *connector;
3761         struct drm_connector_list_iter conn_iter;
3762         struct intel_dp *intel_dp;
3763
3764         drm_connector_list_iter_begin(dev, &conn_iter);
3765         drm_for_each_connector_iter(connector, &conn_iter) {
3766                 struct intel_encoder *encoder;
3767
3768                 if (connector->connector_type !=
3769                     DRM_MODE_CONNECTOR_DisplayPort)
3770                         continue;
3771
3772                 encoder = to_intel_encoder(connector->encoder);
3773                 if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
3774                         continue;
3775
3776                 if (encoder && connector->status == connector_status_connected) {
3777                         intel_dp = enc_to_intel_dp(&encoder->base);
3778                         seq_printf(m, "%02lx", intel_dp->compliance.test_type);
3779                 } else
3780                         seq_puts(m, "0");
3781         }
3782         drm_connector_list_iter_end(&conn_iter);
3783
3784         return 0;
3785 }
3786 DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type);
3787
3788 static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
3789 {
3790         struct drm_i915_private *dev_priv = m->private;
3791         struct drm_device *dev = &dev_priv->drm;
3792         int level;
3793         int num_levels;
3794
3795         if (IS_CHERRYVIEW(dev_priv))
3796                 num_levels = 3;
3797         else if (IS_VALLEYVIEW(dev_priv))
3798                 num_levels = 1;
3799         else if (IS_G4X(dev_priv))
3800                 num_levels = 3;
3801         else
3802                 num_levels = ilk_wm_max_level(dev_priv) + 1;
3803
3804         drm_modeset_lock_all(dev);
3805
3806         for (level = 0; level < num_levels; level++) {
3807                 unsigned int latency = wm[level];
3808
3809                 /*
3810                  * - WM1+ latency values in 0.5us units
3811                  * - latencies are in us on gen9/vlv/chv
3812                  */
3813                 if (INTEL_GEN(dev_priv) >= 9 ||
3814                     IS_VALLEYVIEW(dev_priv) ||
3815                     IS_CHERRYVIEW(dev_priv) ||
3816                     IS_G4X(dev_priv))
3817                         latency *= 10;
3818                 else if (level > 0)
3819                         latency *= 5;
3820
3821                 seq_printf(m, "WM%d %u (%u.%u usec)\n",
3822                            level, wm[level], latency / 10, latency % 10);
3823         }
3824
3825         drm_modeset_unlock_all(dev);
3826 }
3827
3828 static int pri_wm_latency_show(struct seq_file *m, void *data)
3829 {
3830         struct drm_i915_private *dev_priv = m->private;
3831         const uint16_t *latencies;
3832
3833         if (INTEL_GEN(dev_priv) >= 9)
3834                 latencies = dev_priv->wm.skl_latency;
3835         else
3836                 latencies = dev_priv->wm.pri_latency;
3837
3838         wm_latency_show(m, latencies);
3839
3840         return 0;
3841 }
3842
3843 static int spr_wm_latency_show(struct seq_file *m, void *data)
3844 {
3845         struct drm_i915_private *dev_priv = m->private;
3846         const uint16_t *latencies;
3847
3848         if (INTEL_GEN(dev_priv) >= 9)
3849                 latencies = dev_priv->wm.skl_latency;
3850         else
3851                 latencies = dev_priv->wm.spr_latency;
3852
3853         wm_latency_show(m, latencies);
3854
3855         return 0;
3856 }
3857
3858 static int cur_wm_latency_show(struct seq_file *m, void *data)
3859 {
3860         struct drm_i915_private *dev_priv = m->private;
3861         const uint16_t *latencies;
3862
3863         if (INTEL_GEN(dev_priv) >= 9)
3864                 latencies = dev_priv->wm.skl_latency;
3865         else
3866                 latencies = dev_priv->wm.cur_latency;
3867
3868         wm_latency_show(m, latencies);
3869
3870         return 0;
3871 }
3872
3873 static int pri_wm_latency_open(struct inode *inode, struct file *file)
3874 {
3875         struct drm_i915_private *dev_priv = inode->i_private;
3876
3877         if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
3878                 return -ENODEV;
3879
3880         return single_open(file, pri_wm_latency_show, dev_priv);
3881 }
3882
3883 static int spr_wm_latency_open(struct inode *inode, struct file *file)
3884 {
3885         struct drm_i915_private *dev_priv = inode->i_private;
3886
3887         if (HAS_GMCH_DISPLAY(dev_priv))
3888                 return -ENODEV;
3889
3890         return single_open(file, spr_wm_latency_show, dev_priv);
3891 }
3892
3893 static int cur_wm_latency_open(struct inode *inode, struct file *file)
3894 {
3895         struct drm_i915_private *dev_priv = inode->i_private;
3896
3897         if (HAS_GMCH_DISPLAY(dev_priv))
3898                 return -ENODEV;
3899
3900         return single_open(file, cur_wm_latency_show, dev_priv);
3901 }
3902
3903 static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
3904                                 size_t len, loff_t *offp, uint16_t wm[8])
3905 {
3906         struct seq_file *m = file->private_data;
3907         struct drm_i915_private *dev_priv = m->private;
3908         struct drm_device *dev = &dev_priv->drm;
3909         uint16_t new[8] = { 0 };
3910         int num_levels;
3911         int level;
3912         int ret;
3913         char tmp[32];
3914
3915         if (IS_CHERRYVIEW(dev_priv))
3916                 num_levels = 3;
3917         else if (IS_VALLEYVIEW(dev_priv))
3918                 num_levels = 1;
3919         else if (IS_G4X(dev_priv))
3920                 num_levels = 3;
3921         else
3922                 num_levels = ilk_wm_max_level(dev_priv) + 1;
3923
3924         if (len >= sizeof(tmp))
3925                 return -EINVAL;
3926
3927         if (copy_from_user(tmp, ubuf, len))
3928                 return -EFAULT;
3929
3930         tmp[len] = '\0';
3931
3932         ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
3933                      &new[0], &new[1], &new[2], &new[3],
3934                      &new[4], &new[5], &new[6], &new[7]);
3935         if (ret != num_levels)
3936                 return -EINVAL;
3937
3938         drm_modeset_lock_all(dev);
3939
3940         for (level = 0; level < num_levels; level++)
3941                 wm[level] = new[level];
3942
3943         drm_modeset_unlock_all(dev);
3944
3945         return len;
3946 }
3947
3948
3949 static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
3950                                     size_t len, loff_t *offp)
3951 {
3952         struct seq_file *m = file->private_data;
3953         struct drm_i915_private *dev_priv = m->private;
3954         uint16_t *latencies;
3955
3956         if (INTEL_GEN(dev_priv) >= 9)
3957                 latencies = dev_priv->wm.skl_latency;
3958         else
3959                 latencies = dev_priv->wm.pri_latency;
3960
3961         return wm_latency_write(file, ubuf, len, offp, latencies);
3962 }
3963
3964 static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
3965                                     size_t len, loff_t *offp)
3966 {
3967         struct seq_file *m = file->private_data;
3968         struct drm_i915_private *dev_priv = m->private;
3969         uint16_t *latencies;
3970
3971         if (INTEL_GEN(dev_priv) >= 9)
3972                 latencies = dev_priv->wm.skl_latency;
3973         else
3974                 latencies = dev_priv->wm.spr_latency;
3975
3976         return wm_latency_write(file, ubuf, len, offp, latencies);
3977 }
3978
3979 static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
3980                                     size_t len, loff_t *offp)
3981 {
3982         struct seq_file *m = file->private_data;
3983         struct drm_i915_private *dev_priv = m->private;
3984         uint16_t *latencies;
3985
3986         if (INTEL_GEN(dev_priv) >= 9)
3987                 latencies = dev_priv->wm.skl_latency;
3988         else
3989                 latencies = dev_priv->wm.cur_latency;
3990
3991         return wm_latency_write(file, ubuf, len, offp, latencies);
3992 }
3993
3994 static const struct file_operations i915_pri_wm_latency_fops = {
3995         .owner = THIS_MODULE,
3996         .open = pri_wm_latency_open,
3997         .read = seq_read,
3998         .llseek = seq_lseek,
3999         .release = single_release,
4000         .write = pri_wm_latency_write
4001 };
4002
4003 static const struct file_operations i915_spr_wm_latency_fops = {
4004         .owner = THIS_MODULE,
4005         .open = spr_wm_latency_open,
4006         .read = seq_read,
4007         .llseek = seq_lseek,
4008         .release = single_release,
4009         .write = spr_wm_latency_write
4010 };
4011
4012 static const struct file_operations i915_cur_wm_latency_fops = {
4013         .owner = THIS_MODULE,
4014         .open = cur_wm_latency_open,
4015         .read = seq_read,
4016         .llseek = seq_lseek,
4017         .release = single_release,
4018         .write = cur_wm_latency_write
4019 };
4020
4021 static int
4022 i915_wedged_get(void *data, u64 *val)
4023 {
4024         struct drm_i915_private *dev_priv = data;
4025
4026         *val = i915_terminally_wedged(&dev_priv->gpu_error);
4027
4028         return 0;
4029 }
4030
4031 static int
4032 i915_wedged_set(void *data, u64 val)
4033 {
4034         struct drm_i915_private *i915 = data;
4035         struct intel_engine_cs *engine;
4036         unsigned int tmp;
4037
4038         /*
4039          * There is no safeguard against this debugfs entry colliding
4040          * with the hangcheck calling same i915_handle_error() in
4041          * parallel, causing an explosion. For now we assume that the
4042          * test harness is responsible enough not to inject gpu hangs
4043          * while it is writing to 'i915_wedged'
4044          */
4045
4046         if (i915_reset_backoff(&i915->gpu_error))
4047                 return -EAGAIN;
4048
4049         for_each_engine_masked(engine, i915, val, tmp) {
4050                 engine->hangcheck.seqno = intel_engine_get_seqno(engine);
4051                 engine->hangcheck.stalled = true;
4052         }
4053
4054         i915_handle_error(i915, val, I915_ERROR_CAPTURE,
4055                           "Manually set wedged engine mask = %llx", val);
4056
4057         wait_on_bit(&i915->gpu_error.flags,
4058                     I915_RESET_HANDOFF,
4059                     TASK_UNINTERRUPTIBLE);
4060
4061         return 0;
4062 }
4063
4064 DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
4065                         i915_wedged_get, i915_wedged_set,
4066                         "%llu\n");
4067
4068 static int
4069 fault_irq_set(struct drm_i915_private *i915,
4070               unsigned long *irq,
4071               unsigned long val)
4072 {
4073         int err;
4074
4075         err = mutex_lock_interruptible(&i915->drm.struct_mutex);
4076         if (err)
4077                 return err;
4078
4079         err = i915_gem_wait_for_idle(i915,
4080                                      I915_WAIT_LOCKED |
4081                                      I915_WAIT_INTERRUPTIBLE,
4082                                      MAX_SCHEDULE_TIMEOUT);
4083         if (err)
4084                 goto err_unlock;
4085
4086         *irq = val;
4087         mutex_unlock(&i915->drm.struct_mutex);
4088
4089         /* Flush idle worker to disarm irq */
4090         drain_delayed_work(&i915->gt.idle_work);
4091
4092         return 0;
4093
4094 err_unlock:
4095         mutex_unlock(&i915->drm.struct_mutex);
4096         return err;
4097 }
4098
4099 static int
4100 i915_ring_missed_irq_get(void *data, u64 *val)
4101 {
4102         struct drm_i915_private *dev_priv = data;
4103
4104         *val = dev_priv->gpu_error.missed_irq_rings;
4105         return 0;
4106 }
4107
4108 static int
4109 i915_ring_missed_irq_set(void *data, u64 val)
4110 {
4111         struct drm_i915_private *i915 = data;
4112
4113         return fault_irq_set(i915, &i915->gpu_error.missed_irq_rings, val);
4114 }
4115
4116 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops,
4117                         i915_ring_missed_irq_get, i915_ring_missed_irq_set,
4118                         "0x%08llx\n");
4119
4120 static int
4121 i915_ring_test_irq_get(void *data, u64 *val)
4122 {
4123         struct drm_i915_private *dev_priv = data;
4124
4125         *val = dev_priv->gpu_error.test_irq_rings;
4126
4127         return 0;
4128 }
4129
4130 static int
4131 i915_ring_test_irq_set(void *data, u64 val)
4132 {
4133         struct drm_i915_private *i915 = data;
4134
4135         /* GuC keeps the user interrupt permanently enabled for submission */
4136         if (USES_GUC_SUBMISSION(i915))
4137                 return -ENODEV;
4138
4139         /*
4140          * From icl, we can no longer individually mask interrupt generation
4141          * from each engine.
4142          */
4143         if (INTEL_GEN(i915) >= 11)
4144                 return -ENODEV;
4145
4146         val &= INTEL_INFO(i915)->ring_mask;
4147         DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val);
4148
4149         return fault_irq_set(i915, &i915->gpu_error.test_irq_rings, val);
4150 }
4151
4152 DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops,
4153                         i915_ring_test_irq_get, i915_ring_test_irq_set,
4154                         "0x%08llx\n");
4155
4156 #define DROP_UNBOUND    BIT(0)
4157 #define DROP_BOUND      BIT(1)
4158 #define DROP_RETIRE     BIT(2)
4159 #define DROP_ACTIVE     BIT(3)
4160 #define DROP_FREED      BIT(4)
4161 #define DROP_SHRINK_ALL BIT(5)
4162 #define DROP_IDLE       BIT(6)
4163 #define DROP_RESET_ACTIVE       BIT(7)
4164 #define DROP_RESET_SEQNO        BIT(8)
4165 #define DROP_ALL (DROP_UNBOUND  | \
4166                   DROP_BOUND    | \
4167                   DROP_RETIRE   | \
4168                   DROP_ACTIVE   | \
4169                   DROP_FREED    | \
4170                   DROP_SHRINK_ALL |\
4171                   DROP_IDLE     | \
4172                   DROP_RESET_ACTIVE | \
4173                   DROP_RESET_SEQNO)
4174 static int
4175 i915_drop_caches_get(void *data, u64 *val)
4176 {
4177         *val = DROP_ALL;
4178
4179         return 0;
4180 }
4181
4182 static int
4183 i915_drop_caches_set(void *data, u64 val)
4184 {
4185         struct drm_i915_private *i915 = data;
4186         int ret = 0;
4187
4188         DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
4189                   val, val & DROP_ALL);
4190         intel_runtime_pm_get(i915);
4191
4192         if (val & DROP_RESET_ACTIVE && !intel_engines_are_idle(i915))
4193                 i915_gem_set_wedged(i915);
4194
4195         /* No need to check and wait for gpu resets, only libdrm auto-restarts
4196          * on ioctls on -EAGAIN. */
4197         if (val & (DROP_ACTIVE | DROP_RETIRE | DROP_RESET_SEQNO)) {
4198                 ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
4199                 if (ret)
4200                         goto out;
4201
4202                 if (val & DROP_ACTIVE)
4203                         ret = i915_gem_wait_for_idle(i915,
4204                                                      I915_WAIT_INTERRUPTIBLE |
4205                                                      I915_WAIT_LOCKED,
4206                                                      MAX_SCHEDULE_TIMEOUT);
4207
4208                 if (ret == 0 && val & DROP_RESET_SEQNO)
4209                         ret = i915_gem_set_global_seqno(&i915->drm, 1);
4210
4211                 if (val & DROP_RETIRE)
4212                         i915_retire_requests(i915);
4213
4214                 mutex_unlock(&i915->drm.struct_mutex);
4215         }
4216
4217         if (val & DROP_RESET_ACTIVE &&
4218             i915_terminally_wedged(&i915->gpu_error)) {
4219                 i915_handle_error(i915, ALL_ENGINES, 0, NULL);
4220                 wait_on_bit(&i915->gpu_error.flags,
4221                             I915_RESET_HANDOFF,
4222                             TASK_UNINTERRUPTIBLE);
4223         }
4224
4225         fs_reclaim_acquire(GFP_KERNEL);
4226         if (val & DROP_BOUND)
4227                 i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_BOUND);
4228
4229         if (val & DROP_UNBOUND)
4230                 i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_UNBOUND);
4231
4232         if (val & DROP_SHRINK_ALL)
4233                 i915_gem_shrink_all(i915);
4234         fs_reclaim_release(GFP_KERNEL);
4235
4236         if (val & DROP_IDLE) {
4237                 do {
4238                         if (READ_ONCE(i915->gt.active_requests))
4239                                 flush_delayed_work(&i915->gt.retire_work);
4240                         drain_delayed_work(&i915->gt.idle_work);
4241                 } while (READ_ONCE(i915->gt.awake));
4242         }
4243
4244         if (val & DROP_FREED)
4245                 i915_gem_drain_freed_objects(i915);
4246
4247 out:
4248         intel_runtime_pm_put(i915);
4249
4250         return ret;
4251 }
4252
4253 DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
4254                         i915_drop_caches_get, i915_drop_caches_set,
4255                         "0x%08llx\n");
4256
4257 static int
4258 i915_cache_sharing_get(void *data, u64 *val)
4259 {
4260         struct drm_i915_private *dev_priv = data;
4261         u32 snpcr;
4262
4263         if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv)))
4264                 return -ENODEV;
4265
4266         intel_runtime_pm_get(dev_priv);
4267
4268         snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
4269
4270         intel_runtime_pm_put(dev_priv);
4271
4272         *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
4273
4274         return 0;
4275 }
4276
4277 static int
4278 i915_cache_sharing_set(void *data, u64 val)
4279 {
4280         struct drm_i915_private *dev_priv = data;
4281         u32 snpcr;
4282
4283         if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv)))
4284                 return -ENODEV;
4285
4286         if (val > 3)
4287                 return -EINVAL;
4288
4289         intel_runtime_pm_get(dev_priv);
4290         DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
4291
4292         /* Update the cache sharing policy here as well */
4293         snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
4294         snpcr &= ~GEN6_MBC_SNPCR_MASK;
4295         snpcr |= (val << GEN6_MBC_SNPCR_SHIFT);
4296         I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
4297
4298         intel_runtime_pm_put(dev_priv);
4299         return 0;
4300 }
4301
4302 DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops,
4303                         i915_cache_sharing_get, i915_cache_sharing_set,
4304                         "%llu\n");
4305
4306 static void cherryview_sseu_device_status(struct drm_i915_private *dev_priv,
4307                                           struct sseu_dev_info *sseu)
4308 {
4309 #define SS_MAX 2
4310         const int ss_max = SS_MAX;
4311         u32 sig1[SS_MAX], sig2[SS_MAX];
4312         int ss;
4313
4314         sig1[0] = I915_READ(CHV_POWER_SS0_SIG1);
4315         sig1[1] = I915_READ(CHV_POWER_SS1_SIG1);
4316         sig2[0] = I915_READ(CHV_POWER_SS0_SIG2);
4317         sig2[1] = I915_READ(CHV_POWER_SS1_SIG2);
4318
4319         for (ss = 0; ss < ss_max; ss++) {
4320                 unsigned int eu_cnt;
4321
4322                 if (sig1[ss] & CHV_SS_PG_ENABLE)
4323                         /* skip disabled subslice */
4324                         continue;
4325
4326                 sseu->slice_mask = BIT(0);
4327                 sseu->subslice_mask[0] |= BIT(ss);
4328                 eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) +
4329                          ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) +
4330                          ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) +
4331                          ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2);
4332                 sseu->eu_total += eu_cnt;
4333                 sseu->eu_per_subslice = max_t(unsigned int,
4334                                               sseu->eu_per_subslice, eu_cnt);
4335         }
4336 #undef SS_MAX
4337 }
4338
4339 static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
4340                                      struct sseu_dev_info *sseu)
4341 {
4342 #define SS_MAX 6
4343         const struct intel_device_info *info = INTEL_INFO(dev_priv);
4344         u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
4345         int s, ss;
4346
4347         for (s = 0; s < info->sseu.max_slices; s++) {
4348                 /*
4349                  * FIXME: Valid SS Mask respects the spec and read
4350                  * only valid bits for those registers, excluding reserverd
4351                  * although this seems wrong because it would leave many
4352                  * subslices without ACK.
4353                  */
4354                 s_reg[s] = I915_READ(GEN10_SLICE_PGCTL_ACK(s)) &
4355                         GEN10_PGCTL_VALID_SS_MASK(s);
4356                 eu_reg[2 * s] = I915_READ(GEN10_SS01_EU_PGCTL_ACK(s));
4357                 eu_reg[2 * s + 1] = I915_READ(GEN10_SS23_EU_PGCTL_ACK(s));
4358         }
4359
4360         eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
4361                      GEN9_PGCTL_SSA_EU19_ACK |
4362                      GEN9_PGCTL_SSA_EU210_ACK |
4363                      GEN9_PGCTL_SSA_EU311_ACK;
4364         eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
4365                      GEN9_PGCTL_SSB_EU19_ACK |
4366                      GEN9_PGCTL_SSB_EU210_ACK |
4367                      GEN9_PGCTL_SSB_EU311_ACK;
4368
4369         for (s = 0; s < info->sseu.max_slices; s++) {
4370                 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
4371                         /* skip disabled slice */
4372                         continue;
4373
4374                 sseu->slice_mask |= BIT(s);
4375                 sseu->subslice_mask[s] = info->sseu.subslice_mask[s];
4376
4377                 for (ss = 0; ss < info->sseu.max_subslices; ss++) {
4378                         unsigned int eu_cnt;
4379
4380                         if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
4381                                 /* skip disabled subslice */
4382                                 continue;
4383
4384                         eu_cnt = 2 * hweight32(eu_reg[2 * s + ss / 2] &
4385                                                eu_mask[ss % 2]);
4386                         sseu->eu_total += eu_cnt;
4387                         sseu->eu_per_subslice = max_t(unsigned int,
4388                                                       sseu->eu_per_subslice,
4389                                                       eu_cnt);
4390                 }
4391         }
4392 #undef SS_MAX
4393 }
4394
4395 static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
4396                                     struct sseu_dev_info *sseu)
4397 {
4398 #define SS_MAX 3
4399         const struct intel_device_info *info = INTEL_INFO(dev_priv);
4400         u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
4401         int s, ss;
4402
4403         for (s = 0; s < info->sseu.max_slices; s++) {
4404                 s_reg[s] = I915_READ(GEN9_SLICE_PGCTL_ACK(s));
4405                 eu_reg[2*s] = I915_READ(GEN9_SS01_EU_PGCTL_ACK(s));
4406                 eu_reg[2*s + 1] = I915_READ(GEN9_SS23_EU_PGCTL_ACK(s));
4407         }
4408
4409         eu_mask[0] = GEN9_PGCTL_SSA_EU08_ACK |
4410                      GEN9_PGCTL_SSA_EU19_ACK |
4411                      GEN9_PGCTL_SSA_EU210_ACK |
4412                      GEN9_PGCTL_SSA_EU311_ACK;
4413         eu_mask[1] = GEN9_PGCTL_SSB_EU08_ACK |
4414                      GEN9_PGCTL_SSB_EU19_ACK |
4415                      GEN9_PGCTL_SSB_EU210_ACK |
4416                      GEN9_PGCTL_SSB_EU311_ACK;
4417
4418         for (s = 0; s < info->sseu.max_slices; s++) {
4419                 if ((s_reg[s] & GEN9_PGCTL_SLICE_ACK) == 0)
4420                         /* skip disabled slice */
4421                         continue;
4422
4423                 sseu->slice_mask |= BIT(s);
4424
4425                 if (IS_GEN9_BC(dev_priv))
4426                         sseu->subslice_mask[s] =
4427                                 INTEL_INFO(dev_priv)->sseu.subslice_mask[s];
4428
4429                 for (ss = 0; ss < info->sseu.max_subslices; ss++) {
4430                         unsigned int eu_cnt;
4431
4432                         if (IS_GEN9_LP(dev_priv)) {
4433                                 if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss))))
4434                                         /* skip disabled subslice */
4435                                         continue;
4436
4437                                 sseu->subslice_mask[s] |= BIT(ss);
4438                         }
4439
4440                         eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] &
4441                                                eu_mask[ss%2]);
4442                         sseu->eu_total += eu_cnt;
4443                         sseu->eu_per_subslice = max_t(unsigned int,
4444                                                       sseu->eu_per_subslice,
4445                                                       eu_cnt);
4446                 }
4447         }
4448 #undef SS_MAX
4449 }
4450
4451 static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
4452                                          struct sseu_dev_info *sseu)
4453 {
4454         u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
4455         int s;
4456
4457         sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK;
4458
4459         if (sseu->slice_mask) {
4460                 sseu->eu_per_subslice =
4461                                 INTEL_INFO(dev_priv)->sseu.eu_per_subslice;
4462                 for (s = 0; s < fls(sseu->slice_mask); s++) {
4463                         sseu->subslice_mask[s] =
4464                                 INTEL_INFO(dev_priv)->sseu.subslice_mask[s];
4465                 }
4466                 sseu->eu_total = sseu->eu_per_subslice *
4467                                  sseu_subslice_total(sseu);
4468
4469                 /* subtract fused off EU(s) from enabled slice(s) */
4470                 for (s = 0; s < fls(sseu->slice_mask); s++) {
4471                         u8 subslice_7eu =
4472                                 INTEL_INFO(dev_priv)->sseu.subslice_7eu[s];
4473
4474                         sseu->eu_total -= hweight8(subslice_7eu);
4475                 }
4476         }
4477 }
4478
4479 static void i915_print_sseu_info(struct seq_file *m, bool is_available_info,
4480                                  const struct sseu_dev_info *sseu)
4481 {
4482         struct drm_i915_private *dev_priv = node_to_i915(m->private);
4483         const char *type = is_available_info ? "Available" : "Enabled";
4484         int s;
4485
4486         seq_printf(m, "  %s Slice Mask: %04x\n", type,
4487                    sseu->slice_mask);
4488         seq_printf(m, "  %s Slice Total: %u\n", type,
4489                    hweight8(sseu->slice_mask));
4490         seq_printf(m, "  %s Subslice Total: %u\n", type,
4491                    sseu_subslice_total(sseu));
4492         for (s = 0; s < fls(sseu->slice_mask); s++) {
4493                 seq_printf(m, "  %s Slice%i subslices: %u\n", type,
4494                            s, hweight8(sseu->subslice_mask[s]));
4495         }
4496         seq_printf(m, "  %s EU Total: %u\n", type,
4497                    sseu->eu_total);
4498         seq_printf(m, "  %s EU Per Subslice: %u\n", type,
4499                    sseu->eu_per_subslice);
4500
4501         if (!is_available_info)
4502                 return;
4503
4504         seq_printf(m, "  Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev_priv)));
4505         if (HAS_POOLED_EU(dev_priv))
4506                 seq_printf(m, "  Min EU in pool: %u\n", sseu->min_eu_in_pool);
4507
4508         seq_printf(m, "  Has Slice Power Gating: %s\n",
4509                    yesno(sseu->has_slice_pg));
4510         seq_printf(m, "  Has Subslice Power Gating: %s\n",
4511                    yesno(sseu->has_subslice_pg));
4512         seq_printf(m, "  Has EU Power Gating: %s\n",
4513                    yesno(sseu->has_eu_pg));
4514 }
4515
4516 static int i915_sseu_status(struct seq_file *m, void *unused)
4517 {
4518         struct drm_i915_private *dev_priv = node_to_i915(m->private);
4519         struct sseu_dev_info sseu;
4520
4521         if (INTEL_GEN(dev_priv) < 8)
4522                 return -ENODEV;
4523
4524         seq_puts(m, "SSEU Device Info\n");
4525         i915_print_sseu_info(m, true, &INTEL_INFO(dev_priv)->sseu);
4526
4527         seq_puts(m, "SSEU Device Status\n");
4528         memset(&sseu, 0, sizeof(sseu));
4529         sseu.max_slices = INTEL_INFO(dev_priv)->sseu.max_slices;
4530         sseu.max_subslices = INTEL_INFO(dev_priv)->sseu.max_subslices;
4531         sseu.max_eus_per_subslice =
4532                 INTEL_INFO(dev_priv)->sseu.max_eus_per_subslice;
4533
4534         intel_runtime_pm_get(dev_priv);
4535
4536         if (IS_CHERRYVIEW(dev_priv)) {
4537                 cherryview_sseu_device_status(dev_priv, &sseu);
4538         } else if (IS_BROADWELL(dev_priv)) {
4539                 broadwell_sseu_device_status(dev_priv, &sseu);
4540         } else if (IS_GEN9(dev_priv)) {
4541                 gen9_sseu_device_status(dev_priv, &sseu);
4542         } else if (INTEL_GEN(dev_priv) >= 10) {
4543                 gen10_sseu_device_status(dev_priv, &sseu);
4544         }
4545
4546         intel_runtime_pm_put(dev_priv);
4547
4548         i915_print_sseu_info(m, false, &sseu);
4549
4550         return 0;
4551 }
4552
4553 static int i915_forcewake_open(struct inode *inode, struct file *file)
4554 {
4555         struct drm_i915_private *i915 = inode->i_private;
4556
4557         if (INTEL_GEN(i915) < 6)
4558                 return 0;
4559
4560         intel_runtime_pm_get(i915);
4561         intel_uncore_forcewake_user_get(i915);
4562
4563         return 0;
4564 }
4565
4566 static int i915_forcewake_release(struct inode *inode, struct file *file)
4567 {
4568         struct drm_i915_private *i915 = inode->i_private;
4569
4570         if (INTEL_GEN(i915) < 6)
4571                 return 0;
4572
4573         intel_uncore_forcewake_user_put(i915);
4574         intel_runtime_pm_put(i915);
4575
4576         return 0;
4577 }
4578
4579 static const struct file_operations i915_forcewake_fops = {
4580         .owner = THIS_MODULE,
4581         .open = i915_forcewake_open,
4582         .release = i915_forcewake_release,
4583 };
4584
4585 static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
4586 {
4587         struct drm_i915_private *dev_priv = m->private;
4588         struct i915_hotplug *hotplug = &dev_priv->hotplug;
4589
4590         seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
4591         seq_printf(m, "Detected: %s\n",
4592                    yesno(delayed_work_pending(&hotplug->reenable_work)));
4593
4594         return 0;
4595 }
4596
4597 static ssize_t i915_hpd_storm_ctl_write(struct file *file,
4598                                         const char __user *ubuf, size_t len,
4599                                         loff_t *offp)
4600 {
4601         struct seq_file *m = file->private_data;
4602         struct drm_i915_private *dev_priv = m->private;
4603         struct i915_hotplug *hotplug = &dev_priv->hotplug;
4604         unsigned int new_threshold;
4605         int i;
4606         char *newline;
4607         char tmp[16];
4608
4609         if (len >= sizeof(tmp))
4610                 return -EINVAL;
4611
4612         if (copy_from_user(tmp, ubuf, len))
4613                 return -EFAULT;
4614
4615         tmp[len] = '\0';
4616
4617         /* Strip newline, if any */
4618         newline = strchr(tmp, '\n');
4619         if (newline)
4620                 *newline = '\0';
4621
4622         if (strcmp(tmp, "reset") == 0)
4623                 new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
4624         else if (kstrtouint(tmp, 10, &new_threshold) != 0)
4625                 return -EINVAL;
4626
4627         if (new_threshold > 0)
4628                 DRM_DEBUG_KMS("Setting HPD storm detection threshold to %d\n",
4629                               new_threshold);
4630         else
4631                 DRM_DEBUG_KMS("Disabling HPD storm detection\n");
4632
4633         spin_lock_irq(&dev_priv->irq_lock);
4634         hotplug->hpd_storm_threshold = new_threshold;
4635         /* Reset the HPD storm stats so we don't accidentally trigger a storm */
4636         for_each_hpd_pin(i)
4637                 hotplug->stats[i].count = 0;
4638         spin_unlock_irq(&dev_priv->irq_lock);
4639
4640         /* Re-enable hpd immediately if we were in an irq storm */
4641         flush_delayed_work(&dev_priv->hotplug.reenable_work);
4642
4643         return len;
4644 }
4645
4646 static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
4647 {
4648         return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
4649 }
4650
4651 static const struct file_operations i915_hpd_storm_ctl_fops = {
4652         .owner = THIS_MODULE,
4653         .open = i915_hpd_storm_ctl_open,
4654         .read = seq_read,
4655         .llseek = seq_lseek,
4656         .release = single_release,
4657         .write = i915_hpd_storm_ctl_write
4658 };
4659
4660 static int i915_drrs_ctl_set(void *data, u64 val)
4661 {
4662         struct drm_i915_private *dev_priv = data;
4663         struct drm_device *dev = &dev_priv->drm;
4664         struct intel_crtc *crtc;
4665
4666         if (INTEL_GEN(dev_priv) < 7)
4667                 return -ENODEV;
4668
4669         for_each_intel_crtc(dev, crtc) {
4670                 struct drm_connector_list_iter conn_iter;
4671                 struct intel_crtc_state *crtc_state;
4672                 struct drm_connector *connector;
4673                 struct drm_crtc_commit *commit;
4674                 int ret;
4675
4676                 ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex);
4677                 if (ret)
4678                         return ret;
4679
4680                 crtc_state = to_intel_crtc_state(crtc->base.state);
4681
4682                 if (!crtc_state->base.active ||
4683                     !crtc_state->has_drrs)
4684                         goto out;
4685
4686                 commit = crtc_state->base.commit;
4687                 if (commit) {
4688                         ret = wait_for_completion_interruptible(&commit->hw_done);
4689                         if (ret)
4690                                 goto out;
4691                 }
4692
4693                 drm_connector_list_iter_begin(dev, &conn_iter);
4694                 drm_for_each_connector_iter(connector, &conn_iter) {
4695                         struct intel_encoder *encoder;
4696                         struct intel_dp *intel_dp;
4697
4698                         if (!(crtc_state->base.connector_mask &
4699                               drm_connector_mask(connector)))
4700                                 continue;
4701
4702                         encoder = intel_attached_encoder(connector);
4703                         if (encoder->type != INTEL_OUTPUT_EDP)
4704                                 continue;
4705
4706                         DRM_DEBUG_DRIVER("Manually %sabling DRRS. %llu\n",
4707                                                 val ? "en" : "dis", val);
4708
4709                         intel_dp = enc_to_intel_dp(&encoder->base);
4710                         if (val)
4711                                 intel_edp_drrs_enable(intel_dp,
4712                                                       crtc_state);
4713                         else
4714                                 intel_edp_drrs_disable(intel_dp,
4715                                                        crtc_state);
4716                 }
4717                 drm_connector_list_iter_end(&conn_iter);
4718
4719 out:
4720                 drm_modeset_unlock(&crtc->base.mutex);
4721                 if (ret)
4722                         return ret;
4723         }
4724
4725         return 0;
4726 }
4727
4728 DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n");
4729
4730 static ssize_t
4731 i915_fifo_underrun_reset_write(struct file *filp,
4732                                const char __user *ubuf,
4733                                size_t cnt, loff_t *ppos)
4734 {
4735         struct drm_i915_private *dev_priv = filp->private_data;
4736         struct intel_crtc *intel_crtc;
4737         struct drm_device *dev = &dev_priv->drm;
4738         int ret;
4739         bool reset;
4740
4741         ret = kstrtobool_from_user(ubuf, cnt, &reset);
4742         if (ret)
4743                 return ret;
4744
4745         if (!reset)
4746                 return cnt;
4747
4748         for_each_intel_crtc(dev, intel_crtc) {
4749                 struct drm_crtc_commit *commit;
4750                 struct intel_crtc_state *crtc_state;
4751
4752                 ret = drm_modeset_lock_single_interruptible(&intel_crtc->base.mutex);
4753                 if (ret)
4754                         return ret;
4755
4756                 crtc_state = to_intel_crtc_state(intel_crtc->base.state);
4757                 commit = crtc_state->base.commit;
4758                 if (commit) {
4759                         ret = wait_for_completion_interruptible(&commit->hw_done);
4760                         if (!ret)
4761                                 ret = wait_for_completion_interruptible(&commit->flip_done);
4762                 }
4763
4764                 if (!ret && crtc_state->base.active) {
4765                         DRM_DEBUG_KMS("Re-arming FIFO underruns on pipe %c\n",
4766                                       pipe_name(intel_crtc->pipe));
4767
4768                         intel_crtc_arm_fifo_underrun(intel_crtc, crtc_state);
4769                 }
4770
4771                 drm_modeset_unlock(&intel_crtc->base.mutex);
4772
4773                 if (ret)
4774                         return ret;
4775         }
4776
4777         ret = intel_fbc_reset_underrun(dev_priv);
4778         if (ret)
4779                 return ret;
4780
4781         return cnt;
4782 }
4783
4784 static const struct file_operations i915_fifo_underrun_reset_ops = {
4785         .owner = THIS_MODULE,
4786         .open = simple_open,
4787         .write = i915_fifo_underrun_reset_write,
4788         .llseek = default_llseek,
4789 };
4790
4791 static const struct drm_info_list i915_debugfs_list[] = {
4792         {"i915_capabilities", i915_capabilities, 0},
4793         {"i915_gem_objects", i915_gem_object_info, 0},
4794         {"i915_gem_gtt", i915_gem_gtt_info, 0},
4795         {"i915_gem_stolen", i915_gem_stolen_list_info },
4796         {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
4797         {"i915_gem_interrupt", i915_interrupt_info, 0},
4798         {"i915_gem_batch_pool", i915_gem_batch_pool_info, 0},
4799         {"i915_guc_info", i915_guc_info, 0},
4800         {"i915_guc_load_status", i915_guc_load_status_info, 0},
4801         {"i915_guc_log_dump", i915_guc_log_dump, 0},
4802         {"i915_guc_load_err_log_dump", i915_guc_log_dump, 0, (void *)1},
4803         {"i915_guc_stage_pool", i915_guc_stage_pool, 0},
4804         {"i915_huc_load_status", i915_huc_load_status_info, 0},
4805         {"i915_frequency_info", i915_frequency_info, 0},
4806         {"i915_hangcheck_info", i915_hangcheck_info, 0},
4807         {"i915_reset_info", i915_reset_info, 0},
4808         {"i915_drpc_info", i915_drpc_info, 0},
4809         {"i915_emon_status", i915_emon_status, 0},
4810         {"i915_ring_freq_table", i915_ring_freq_table, 0},
4811         {"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
4812         {"i915_fbc_status", i915_fbc_status, 0},
4813         {"i915_ips_status", i915_ips_status, 0},
4814         {"i915_sr_status", i915_sr_status, 0},
4815         {"i915_opregion", i915_opregion, 0},
4816         {"i915_vbt", i915_vbt, 0},
4817         {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
4818         {"i915_context_status", i915_context_status, 0},
4819         {"i915_forcewake_domains", i915_forcewake_domains, 0},
4820         {"i915_swizzle_info", i915_swizzle_info, 0},
4821         {"i915_ppgtt_info", i915_ppgtt_info, 0},
4822         {"i915_llc", i915_llc, 0},
4823         {"i915_edp_psr_status", i915_edp_psr_status, 0},
4824         {"i915_energy_uJ", i915_energy_uJ, 0},
4825         {"i915_runtime_pm_status", i915_runtime_pm_status, 0},
4826         {"i915_power_domain_info", i915_power_domain_info, 0},
4827         {"i915_dmc_info", i915_dmc_info, 0},
4828         {"i915_display_info", i915_display_info, 0},
4829         {"i915_engine_info", i915_engine_info, 0},
4830         {"i915_rcs_topology", i915_rcs_topology, 0},
4831         {"i915_shrinker_info", i915_shrinker_info, 0},
4832         {"i915_shared_dplls_info", i915_shared_dplls_info, 0},
4833         {"i915_dp_mst_info", i915_dp_mst_info, 0},
4834         {"i915_wa_registers", i915_wa_registers, 0},
4835         {"i915_ddb_info", i915_ddb_info, 0},
4836         {"i915_sseu_status", i915_sseu_status, 0},
4837         {"i915_drrs_status", i915_drrs_status, 0},
4838         {"i915_rps_boost_info", i915_rps_boost_info, 0},
4839 };
4840 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
4841
4842 static const struct i915_debugfs_files {
4843         const char *name;
4844         const struct file_operations *fops;
4845 } i915_debugfs_files[] = {
4846         {"i915_wedged", &i915_wedged_fops},
4847         {"i915_cache_sharing", &i915_cache_sharing_fops},
4848         {"i915_ring_missed_irq", &i915_ring_missed_irq_fops},
4849         {"i915_ring_test_irq", &i915_ring_test_irq_fops},
4850         {"i915_gem_drop_caches", &i915_drop_caches_fops},
4851 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
4852         {"i915_error_state", &i915_error_state_fops},
4853         {"i915_gpu_info", &i915_gpu_info_fops},
4854 #endif
4855         {"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
4856         {"i915_next_seqno", &i915_next_seqno_fops},
4857         {"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
4858         {"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
4859         {"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
4860         {"i915_fbc_false_color", &i915_fbc_false_color_fops},
4861         {"i915_dp_test_data", &i915_displayport_test_data_fops},
4862         {"i915_dp_test_type", &i915_displayport_test_type_fops},
4863         {"i915_dp_test_active", &i915_displayport_test_active_fops},
4864         {"i915_guc_log_level", &i915_guc_log_level_fops},
4865         {"i915_guc_log_relay", &i915_guc_log_relay_fops},
4866         {"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
4867         {"i915_ipc_status", &i915_ipc_status_fops},
4868         {"i915_drrs_ctl", &i915_drrs_ctl_fops},
4869         {"i915_edp_psr_debug", &i915_edp_psr_debug_fops}
4870 };
4871
4872 int i915_debugfs_register(struct drm_i915_private *dev_priv)
4873 {
4874         struct drm_minor *minor = dev_priv->drm.primary;
4875         struct dentry *ent;
4876         int i;
4877
4878         ent = debugfs_create_file("i915_forcewake_user", S_IRUSR,
4879                                   minor->debugfs_root, to_i915(minor->dev),
4880                                   &i915_forcewake_fops);
4881         if (!ent)
4882                 return -ENOMEM;
4883
4884         for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
4885                 ent = debugfs_create_file(i915_debugfs_files[i].name,
4886                                           S_IRUGO | S_IWUSR,
4887                                           minor->debugfs_root,
4888                                           to_i915(minor->dev),
4889                                           i915_debugfs_files[i].fops);
4890                 if (!ent)
4891                         return -ENOMEM;
4892         }
4893
4894         return drm_debugfs_create_files(i915_debugfs_list,
4895                                         I915_DEBUGFS_ENTRIES,
4896                                         minor->debugfs_root, minor);
4897 }
4898
4899 struct dpcd_block {
4900         /* DPCD dump start address. */
4901         unsigned int offset;
4902         /* DPCD dump end address, inclusive. If unset, .size will be used. */
4903         unsigned int end;
4904         /* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */
4905         size_t size;
4906         /* Only valid for eDP. */
4907         bool edp;
4908 };
4909
4910 static const struct dpcd_block i915_dpcd_debug[] = {
4911         { .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE },
4912         { .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS },
4913         { .offset = DP_DOWNSTREAM_PORT_0, .size = 16 },
4914         { .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET },
4915         { .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 },
4916         { .offset = DP_SET_POWER },
4917         { .offset = DP_EDP_DPCD_REV },
4918         { .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 },
4919         { .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB },
4920         { .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET },
4921 };
4922
4923 static int i915_dpcd_show(struct seq_file *m, void *data)
4924 {
4925         struct drm_connector *connector = m->private;
4926         struct intel_dp *intel_dp =
4927                 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4928         uint8_t buf[16];
4929         ssize_t err;
4930         int i;
4931
4932         if (connector->status != connector_status_connected)
4933                 return -ENODEV;
4934
4935         for (i = 0; i < ARRAY_SIZE(i915_dpcd_debug); i++) {
4936                 const struct dpcd_block *b = &i915_dpcd_debug[i];
4937                 size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1);
4938
4939                 if (b->edp &&
4940                     connector->connector_type != DRM_MODE_CONNECTOR_eDP)
4941                         continue;
4942
4943                 /* low tech for now */
4944                 if (WARN_ON(size > sizeof(buf)))
4945                         continue;
4946
4947                 err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size);
4948                 if (err < 0)
4949                         seq_printf(m, "%04x: ERROR %d\n", b->offset, (int)err);
4950                 else
4951                         seq_printf(m, "%04x: %*ph\n", b->offset, (int)err, buf);
4952         }
4953
4954         return 0;
4955 }
4956 DEFINE_SHOW_ATTRIBUTE(i915_dpcd);
4957
4958 static int i915_panel_show(struct seq_file *m, void *data)
4959 {
4960         struct drm_connector *connector = m->private;
4961         struct intel_dp *intel_dp =
4962                 enc_to_intel_dp(&intel_attached_encoder(connector)->base);
4963
4964         if (connector->status != connector_status_connected)
4965                 return -ENODEV;
4966
4967         seq_printf(m, "Panel power up delay: %d\n",
4968                    intel_dp->panel_power_up_delay);
4969         seq_printf(m, "Panel power down delay: %d\n",
4970                    intel_dp->panel_power_down_delay);
4971         seq_printf(m, "Backlight on delay: %d\n",
4972                    intel_dp->backlight_on_delay);
4973         seq_printf(m, "Backlight off delay: %d\n",
4974                    intel_dp->backlight_off_delay);
4975
4976         return 0;
4977 }
4978 DEFINE_SHOW_ATTRIBUTE(i915_panel);
4979
4980 static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
4981 {
4982         struct drm_connector *connector = m->private;
4983         struct intel_connector *intel_connector = to_intel_connector(connector);
4984
4985         if (connector->status != connector_status_connected)
4986                 return -ENODEV;
4987
4988         /* HDCP is supported by connector */
4989         if (!intel_connector->hdcp_shim)
4990                 return -EINVAL;
4991
4992         seq_printf(m, "%s:%d HDCP version: ", connector->name,
4993                    connector->base.id);
4994         seq_printf(m, "%s ", !intel_hdcp_capable(intel_connector) ?
4995                    "None" : "HDCP1.4");
4996         seq_puts(m, "\n");
4997
4998         return 0;
4999 }
5000 DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
5001
5002 /**
5003  * i915_debugfs_connector_add - add i915 specific connector debugfs files
5004  * @connector: pointer to a registered drm_connector
5005  *
5006  * Cleanup will be done by drm_connector_unregister() through a call to
5007  * drm_debugfs_connector_remove().
5008  *
5009  * Returns 0 on success, negative error codes on error.
5010  */
5011 int i915_debugfs_connector_add(struct drm_connector *connector)
5012 {
5013         struct dentry *root = connector->debugfs_entry;
5014
5015         /* The connector must have been registered beforehands. */
5016         if (!root)
5017                 return -ENODEV;
5018
5019         if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
5020             connector->connector_type == DRM_MODE_CONNECTOR_eDP)
5021                 debugfs_create_file("i915_dpcd", S_IRUGO, root,
5022                                     connector, &i915_dpcd_fops);
5023
5024         if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
5025                 debugfs_create_file("i915_panel_timings", S_IRUGO, root,
5026                                     connector, &i915_panel_fops);
5027                 debugfs_create_file("i915_psr_sink_status", S_IRUGO, root,
5028                                     connector, &i915_psr_sink_status_fops);
5029         }
5030
5031         if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
5032             connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
5033             connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) {
5034                 debugfs_create_file("i915_hdcp_sink_capability", S_IRUGO, root,
5035                                     connector, &i915_hdcp_sink_capability_fops);
5036         }
5037
5038         return 0;
5039 }