]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/gpu/drm/i915/gt/selftest_workarounds.c
c8d335d63f9cc3ce9fdeacb9b4a602f5e16e4df8
[linux.git] / drivers / gpu / drm / i915 / gt / selftest_workarounds.c
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2018 Intel Corporation
5  */
6
7 #include "gem/i915_gem_pm.h"
8 #include "i915_selftest.h"
9 #include "intel_reset.h"
10
11 #include "selftests/igt_flush_test.h"
12 #include "selftests/igt_reset.h"
13 #include "selftests/igt_spinner.h"
14 #include "selftests/igt_wedge_me.h"
15 #include "selftests/mock_drm.h"
16
17 #include "gem/selftests/igt_gem_utils.h"
18 #include "gem/selftests/mock_context.h"
19
20 static const struct wo_register {
21         enum intel_platform platform;
22         u32 reg;
23 } wo_registers[] = {
24         { INTEL_GEMINILAKE, 0x731c }
25 };
26
27 #define REF_NAME_MAX (INTEL_ENGINE_CS_MAX_NAME + 8)
28 struct wa_lists {
29         struct i915_wa_list gt_wa_list;
30         struct {
31                 char name[REF_NAME_MAX];
32                 struct i915_wa_list wa_list;
33                 struct i915_wa_list ctx_wa_list;
34         } engine[I915_NUM_ENGINES];
35 };
36
37 static void
38 reference_lists_init(struct drm_i915_private *i915, struct wa_lists *lists)
39 {
40         struct intel_engine_cs *engine;
41         enum intel_engine_id id;
42
43         memset(lists, 0, sizeof(*lists));
44
45         wa_init_start(&lists->gt_wa_list, "GT_REF");
46         gt_init_workarounds(i915, &lists->gt_wa_list);
47         wa_init_finish(&lists->gt_wa_list);
48
49         for_each_engine(engine, i915, id) {
50                 struct i915_wa_list *wal = &lists->engine[id].wa_list;
51                 char *name = lists->engine[id].name;
52
53                 snprintf(name, REF_NAME_MAX, "%s_REF", engine->name);
54
55                 wa_init_start(wal, name);
56                 engine_init_workarounds(engine, wal);
57                 wa_init_finish(wal);
58
59                 snprintf(name, REF_NAME_MAX, "%s_CTX_REF", engine->name);
60
61                 __intel_engine_init_ctx_wa(engine,
62                                            &lists->engine[id].ctx_wa_list,
63                                            name);
64         }
65 }
66
67 static void
68 reference_lists_fini(struct drm_i915_private *i915, struct wa_lists *lists)
69 {
70         struct intel_engine_cs *engine;
71         enum intel_engine_id id;
72
73         for_each_engine(engine, i915, id)
74                 intel_wa_list_free(&lists->engine[id].wa_list);
75
76         intel_wa_list_free(&lists->gt_wa_list);
77 }
78
79 static struct drm_i915_gem_object *
80 read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
81 {
82         const u32 base = engine->mmio_base;
83         struct drm_i915_gem_object *result;
84         struct i915_request *rq;
85         struct i915_vma *vma;
86         u32 srm, *cs;
87         int err;
88         int i;
89
90         result = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
91         if (IS_ERR(result))
92                 return result;
93
94         i915_gem_object_set_cache_coherency(result, I915_CACHE_LLC);
95
96         cs = i915_gem_object_pin_map(result, I915_MAP_WB);
97         if (IS_ERR(cs)) {
98                 err = PTR_ERR(cs);
99                 goto err_obj;
100         }
101         memset(cs, 0xc5, PAGE_SIZE);
102         i915_gem_object_flush_map(result);
103         i915_gem_object_unpin_map(result);
104
105         vma = i915_vma_instance(result, &engine->i915->ggtt.vm, NULL);
106         if (IS_ERR(vma)) {
107                 err = PTR_ERR(vma);
108                 goto err_obj;
109         }
110
111         err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
112         if (err)
113                 goto err_obj;
114
115         rq = igt_request_alloc(ctx, engine);
116         if (IS_ERR(rq)) {
117                 err = PTR_ERR(rq);
118                 goto err_pin;
119         }
120
121         i915_vma_lock(vma);
122         err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
123         i915_vma_unlock(vma);
124         if (err)
125                 goto err_req;
126
127         srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
128         if (INTEL_GEN(ctx->i915) >= 8)
129                 srm++;
130
131         cs = intel_ring_begin(rq, 4 * RING_MAX_NONPRIV_SLOTS);
132         if (IS_ERR(cs)) {
133                 err = PTR_ERR(cs);
134                 goto err_req;
135         }
136
137         for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
138                 *cs++ = srm;
139                 *cs++ = i915_mmio_reg_offset(RING_FORCE_TO_NONPRIV(base, i));
140                 *cs++ = i915_ggtt_offset(vma) + sizeof(u32) * i;
141                 *cs++ = 0;
142         }
143         intel_ring_advance(rq, cs);
144
145         i915_request_add(rq);
146         i915_vma_unpin(vma);
147
148         return result;
149
150 err_req:
151         i915_request_add(rq);
152 err_pin:
153         i915_vma_unpin(vma);
154 err_obj:
155         i915_gem_object_put(result);
156         return ERR_PTR(err);
157 }
158
159 static u32
160 get_whitelist_reg(const struct intel_engine_cs *engine, unsigned int i)
161 {
162         i915_reg_t reg = i < engine->whitelist.count ?
163                          engine->whitelist.list[i].reg :
164                          RING_NOPID(engine->mmio_base);
165
166         return i915_mmio_reg_offset(reg);
167 }
168
169 static void
170 print_results(const struct intel_engine_cs *engine, const u32 *results)
171 {
172         unsigned int i;
173
174         for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
175                 u32 expected = get_whitelist_reg(engine, i);
176                 u32 actual = results[i];
177
178                 pr_info("RING_NONPRIV[%d]: expected 0x%08x, found 0x%08x\n",
179                         i, expected, actual);
180         }
181 }
182
183 static int check_whitelist(struct i915_gem_context *ctx,
184                            struct intel_engine_cs *engine)
185 {
186         struct drm_i915_gem_object *results;
187         struct igt_wedge_me wedge;
188         u32 *vaddr;
189         int err;
190         int i;
191
192         results = read_nonprivs(ctx, engine);
193         if (IS_ERR(results))
194                 return PTR_ERR(results);
195
196         err = 0;
197         i915_gem_object_lock(results);
198         igt_wedge_on_timeout(&wedge, ctx->i915, HZ / 5) /* a safety net! */
199                 err = i915_gem_object_set_to_cpu_domain(results, false);
200         i915_gem_object_unlock(results);
201         if (i915_terminally_wedged(ctx->i915))
202                 err = -EIO;
203         if (err)
204                 goto out_put;
205
206         vaddr = i915_gem_object_pin_map(results, I915_MAP_WB);
207         if (IS_ERR(vaddr)) {
208                 err = PTR_ERR(vaddr);
209                 goto out_put;
210         }
211
212         for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++) {
213                 u32 expected = get_whitelist_reg(engine, i);
214                 u32 actual = vaddr[i];
215
216                 if (expected != actual) {
217                         print_results(engine, vaddr);
218                         pr_err("Invalid RING_NONPRIV[%d], expected 0x%08x, found 0x%08x\n",
219                                i, expected, actual);
220
221                         err = -EINVAL;
222                         break;
223                 }
224         }
225
226         i915_gem_object_unpin_map(results);
227 out_put:
228         i915_gem_object_put(results);
229         return err;
230 }
231
232 static int do_device_reset(struct intel_engine_cs *engine)
233 {
234         i915_reset(engine->i915, engine->mask, "live_workarounds");
235         return 0;
236 }
237
238 static int do_engine_reset(struct intel_engine_cs *engine)
239 {
240         return i915_reset_engine(engine, "live_workarounds");
241 }
242
243 static int
244 switch_to_scratch_context(struct intel_engine_cs *engine,
245                           struct igt_spinner *spin)
246 {
247         struct i915_gem_context *ctx;
248         struct i915_request *rq;
249         intel_wakeref_t wakeref;
250         int err = 0;
251
252         ctx = kernel_context(engine->i915);
253         if (IS_ERR(ctx))
254                 return PTR_ERR(ctx);
255
256         GEM_BUG_ON(i915_gem_context_is_bannable(ctx));
257
258         rq = ERR_PTR(-ENODEV);
259         with_intel_runtime_pm(engine->i915, wakeref)
260                 rq = igt_spinner_create_request(spin, ctx, engine, MI_NOOP);
261
262         kernel_context_close(ctx);
263
264         if (IS_ERR(rq)) {
265                 spin = NULL;
266                 err = PTR_ERR(rq);
267                 goto err;
268         }
269
270         i915_request_add(rq);
271
272         if (spin && !igt_wait_for_spinner(spin, rq)) {
273                 pr_err("Spinner failed to start\n");
274                 err = -ETIMEDOUT;
275         }
276
277 err:
278         if (err && spin)
279                 igt_spinner_end(spin);
280
281         return err;
282 }
283
284 static int check_whitelist_across_reset(struct intel_engine_cs *engine,
285                                         int (*reset)(struct intel_engine_cs *),
286                                         const char *name)
287 {
288         struct drm_i915_private *i915 = engine->i915;
289         struct i915_gem_context *ctx;
290         struct igt_spinner spin;
291         intel_wakeref_t wakeref;
292         int err;
293
294         pr_info("Checking %d whitelisted registers (RING_NONPRIV) [%s]\n",
295                 engine->whitelist.count, name);
296
297         err = igt_spinner_init(&spin, i915);
298         if (err)
299                 return err;
300
301         ctx = kernel_context(i915);
302         if (IS_ERR(ctx))
303                 return PTR_ERR(ctx);
304
305         err = check_whitelist(ctx, engine);
306         if (err) {
307                 pr_err("Invalid whitelist *before* %s reset!\n", name);
308                 goto out;
309         }
310
311         err = switch_to_scratch_context(engine, &spin);
312         if (err)
313                 goto out;
314
315         with_intel_runtime_pm(i915, wakeref)
316                 err = reset(engine);
317
318         igt_spinner_end(&spin);
319         igt_spinner_fini(&spin);
320
321         if (err) {
322                 pr_err("%s reset failed\n", name);
323                 goto out;
324         }
325
326         err = check_whitelist(ctx, engine);
327         if (err) {
328                 pr_err("Whitelist not preserved in context across %s reset!\n",
329                        name);
330                 goto out;
331         }
332
333         kernel_context_close(ctx);
334
335         ctx = kernel_context(i915);
336         if (IS_ERR(ctx))
337                 return PTR_ERR(ctx);
338
339         err = check_whitelist(ctx, engine);
340         if (err) {
341                 pr_err("Invalid whitelist *after* %s reset in fresh context!\n",
342                        name);
343                 goto out;
344         }
345
346 out:
347         kernel_context_close(ctx);
348         return err;
349 }
350
351 static struct i915_vma *create_batch(struct i915_gem_context *ctx)
352 {
353         struct drm_i915_gem_object *obj;
354         struct i915_vma *vma;
355         int err;
356
357         obj = i915_gem_object_create_internal(ctx->i915, 16 * PAGE_SIZE);
358         if (IS_ERR(obj))
359                 return ERR_CAST(obj);
360
361         vma = i915_vma_instance(obj, ctx->vm, NULL);
362         if (IS_ERR(vma)) {
363                 err = PTR_ERR(vma);
364                 goto err_obj;
365         }
366
367         err = i915_vma_pin(vma, 0, 0, PIN_USER);
368         if (err)
369                 goto err_obj;
370
371         i915_gem_object_lock(obj);
372         err = i915_gem_object_set_to_wc_domain(obj, true);
373         i915_gem_object_unlock(obj);
374         if (err)
375                 goto err_obj;
376
377         return vma;
378
379 err_obj:
380         i915_gem_object_put(obj);
381         return ERR_PTR(err);
382 }
383
384 static u32 reg_write(u32 old, u32 new, u32 rsvd)
385 {
386         if (rsvd == 0x0000ffff) {
387                 old &= ~(new >> 16);
388                 old |= new & (new >> 16);
389         } else {
390                 old &= ~rsvd;
391                 old |= new & rsvd;
392         }
393
394         return old;
395 }
396
397 static bool wo_register(struct intel_engine_cs *engine, u32 reg)
398 {
399         enum intel_platform platform = INTEL_INFO(engine->i915)->platform;
400         int i;
401
402         for (i = 0; i < ARRAY_SIZE(wo_registers); i++) {
403                 if (wo_registers[i].platform == platform &&
404                     wo_registers[i].reg == reg)
405                         return true;
406         }
407
408         return false;
409 }
410
411 static int check_dirty_whitelist(struct i915_gem_context *ctx,
412                                  struct intel_engine_cs *engine)
413 {
414         const u32 values[] = {
415                 0x00000000,
416                 0x01010101,
417                 0x10100101,
418                 0x03030303,
419                 0x30300303,
420                 0x05050505,
421                 0x50500505,
422                 0x0f0f0f0f,
423                 0xf00ff00f,
424                 0x10101010,
425                 0xf0f01010,
426                 0x30303030,
427                 0xa0a03030,
428                 0x50505050,
429                 0xc0c05050,
430                 0xf0f0f0f0,
431                 0x11111111,
432                 0x33333333,
433                 0x55555555,
434                 0x0000ffff,
435                 0x00ff00ff,
436                 0xff0000ff,
437                 0xffff00ff,
438                 0xffffffff,
439         };
440         struct i915_vma *scratch;
441         struct i915_vma *batch;
442         int err = 0, i, v;
443         u32 *cs, *results;
444
445         scratch = create_scratch(ctx->vm, 2 * ARRAY_SIZE(values) + 1);
446         if (IS_ERR(scratch))
447                 return PTR_ERR(scratch);
448
449         batch = create_batch(ctx);
450         if (IS_ERR(batch)) {
451                 err = PTR_ERR(batch);
452                 goto out_scratch;
453         }
454
455         for (i = 0; i < engine->whitelist.count; i++) {
456                 u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
457                 u64 addr = scratch->node.start;
458                 struct i915_request *rq;
459                 u32 srm, lrm, rsvd;
460                 u32 expect;
461                 int idx;
462
463                 if (wo_register(engine, reg))
464                         continue;
465
466                 srm = MI_STORE_REGISTER_MEM;
467                 lrm = MI_LOAD_REGISTER_MEM;
468                 if (INTEL_GEN(ctx->i915) >= 8)
469                         lrm++, srm++;
470
471                 pr_debug("%s: Writing garbage to %x\n",
472                          engine->name, reg);
473
474                 cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
475                 if (IS_ERR(cs)) {
476                         err = PTR_ERR(cs);
477                         goto out_batch;
478                 }
479
480                 /* SRM original */
481                 *cs++ = srm;
482                 *cs++ = reg;
483                 *cs++ = lower_32_bits(addr);
484                 *cs++ = upper_32_bits(addr);
485
486                 idx = 1;
487                 for (v = 0; v < ARRAY_SIZE(values); v++) {
488                         /* LRI garbage */
489                         *cs++ = MI_LOAD_REGISTER_IMM(1);
490                         *cs++ = reg;
491                         *cs++ = values[v];
492
493                         /* SRM result */
494                         *cs++ = srm;
495                         *cs++ = reg;
496                         *cs++ = lower_32_bits(addr + sizeof(u32) * idx);
497                         *cs++ = upper_32_bits(addr + sizeof(u32) * idx);
498                         idx++;
499                 }
500                 for (v = 0; v < ARRAY_SIZE(values); v++) {
501                         /* LRI garbage */
502                         *cs++ = MI_LOAD_REGISTER_IMM(1);
503                         *cs++ = reg;
504                         *cs++ = ~values[v];
505
506                         /* SRM result */
507                         *cs++ = srm;
508                         *cs++ = reg;
509                         *cs++ = lower_32_bits(addr + sizeof(u32) * idx);
510                         *cs++ = upper_32_bits(addr + sizeof(u32) * idx);
511                         idx++;
512                 }
513                 GEM_BUG_ON(idx * sizeof(u32) > scratch->size);
514
515                 /* LRM original -- don't leave garbage in the context! */
516                 *cs++ = lrm;
517                 *cs++ = reg;
518                 *cs++ = lower_32_bits(addr);
519                 *cs++ = upper_32_bits(addr);
520
521                 *cs++ = MI_BATCH_BUFFER_END;
522
523                 i915_gem_object_flush_map(batch->obj);
524                 i915_gem_object_unpin_map(batch->obj);
525                 i915_gem_chipset_flush(ctx->i915);
526
527                 rq = igt_request_alloc(ctx, engine);
528                 if (IS_ERR(rq)) {
529                         err = PTR_ERR(rq);
530                         goto out_batch;
531                 }
532
533                 if (engine->emit_init_breadcrumb) { /* Be nice if we hang */
534                         err = engine->emit_init_breadcrumb(rq);
535                         if (err)
536                                 goto err_request;
537                 }
538
539                 err = engine->emit_bb_start(rq,
540                                             batch->node.start, PAGE_SIZE,
541                                             0);
542                 if (err)
543                         goto err_request;
544
545 err_request:
546                 i915_request_add(rq);
547                 if (err)
548                         goto out_batch;
549
550                 if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0) {
551                         pr_err("%s: Futzing %x timedout; cancelling test\n",
552                                engine->name, reg);
553                         i915_gem_set_wedged(ctx->i915);
554                         err = -EIO;
555                         goto out_batch;
556                 }
557
558                 results = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB);
559                 if (IS_ERR(results)) {
560                         err = PTR_ERR(results);
561                         goto out_batch;
562                 }
563
564                 GEM_BUG_ON(values[ARRAY_SIZE(values) - 1] != 0xffffffff);
565                 rsvd = results[ARRAY_SIZE(values)]; /* detect write masking */
566                 if (!rsvd) {
567                         pr_err("%s: Unable to write to whitelisted register %x\n",
568                                engine->name, reg);
569                         err = -EINVAL;
570                         goto out_unpin;
571                 }
572
573                 expect = results[0];
574                 idx = 1;
575                 for (v = 0; v < ARRAY_SIZE(values); v++) {
576                         expect = reg_write(expect, values[v], rsvd);
577                         if (results[idx] != expect)
578                                 err++;
579                         idx++;
580                 }
581                 for (v = 0; v < ARRAY_SIZE(values); v++) {
582                         expect = reg_write(expect, ~values[v], rsvd);
583                         if (results[idx] != expect)
584                                 err++;
585                         idx++;
586                 }
587                 if (err) {
588                         pr_err("%s: %d mismatch between values written to whitelisted register [%x], and values read back!\n",
589                                engine->name, err, reg);
590
591                         pr_info("%s: Whitelisted register: %x, original value %08x, rsvd %08x\n",
592                                 engine->name, reg, results[0], rsvd);
593
594                         expect = results[0];
595                         idx = 1;
596                         for (v = 0; v < ARRAY_SIZE(values); v++) {
597                                 u32 w = values[v];
598
599                                 expect = reg_write(expect, w, rsvd);
600                                 pr_info("Wrote %08x, read %08x, expect %08x\n",
601                                         w, results[idx], expect);
602                                 idx++;
603                         }
604                         for (v = 0; v < ARRAY_SIZE(values); v++) {
605                                 u32 w = ~values[v];
606
607                                 expect = reg_write(expect, w, rsvd);
608                                 pr_info("Wrote %08x, read %08x, expect %08x\n",
609                                         w, results[idx], expect);
610                                 idx++;
611                         }
612
613                         err = -EINVAL;
614                 }
615 out_unpin:
616                 i915_gem_object_unpin_map(scratch->obj);
617                 if (err)
618                         break;
619         }
620
621         if (igt_flush_test(ctx->i915, I915_WAIT_LOCKED))
622                 err = -EIO;
623 out_batch:
624         i915_vma_unpin_and_release(&batch, 0);
625 out_scratch:
626         i915_vma_unpin_and_release(&scratch, 0);
627         return err;
628 }
629
630 static int live_dirty_whitelist(void *arg)
631 {
632         struct drm_i915_private *i915 = arg;
633         struct intel_engine_cs *engine;
634         struct i915_gem_context *ctx;
635         enum intel_engine_id id;
636         intel_wakeref_t wakeref;
637         struct drm_file *file;
638         int err = 0;
639
640         /* Can the user write to the whitelisted registers? */
641
642         if (INTEL_GEN(i915) < 7) /* minimum requirement for LRI, SRM, LRM */
643                 return 0;
644
645         wakeref = intel_runtime_pm_get(i915);
646
647         mutex_unlock(&i915->drm.struct_mutex);
648         file = mock_file(i915);
649         mutex_lock(&i915->drm.struct_mutex);
650         if (IS_ERR(file)) {
651                 err = PTR_ERR(file);
652                 goto out_rpm;
653         }
654
655         ctx = live_context(i915, file);
656         if (IS_ERR(ctx)) {
657                 err = PTR_ERR(ctx);
658                 goto out_file;
659         }
660
661         for_each_engine(engine, i915, id) {
662                 if (engine->whitelist.count == 0)
663                         continue;
664
665                 err = check_dirty_whitelist(ctx, engine);
666                 if (err)
667                         goto out_file;
668         }
669
670 out_file:
671         mutex_unlock(&i915->drm.struct_mutex);
672         mock_file_free(i915, file);
673         mutex_lock(&i915->drm.struct_mutex);
674 out_rpm:
675         intel_runtime_pm_put(i915, wakeref);
676         return err;
677 }
678
679 static int live_reset_whitelist(void *arg)
680 {
681         struct drm_i915_private *i915 = arg;
682         struct intel_engine_cs *engine = i915->engine[RCS0];
683         int err = 0;
684
685         /* If we reset the gpu, we should not lose the RING_NONPRIV */
686
687         if (!engine || engine->whitelist.count == 0)
688                 return 0;
689
690         igt_global_reset_lock(i915);
691
692         if (intel_has_reset_engine(i915)) {
693                 err = check_whitelist_across_reset(engine,
694                                                    do_engine_reset,
695                                                    "engine");
696                 if (err)
697                         goto out;
698         }
699
700         if (intel_has_gpu_reset(i915)) {
701                 err = check_whitelist_across_reset(engine,
702                                                    do_device_reset,
703                                                    "device");
704                 if (err)
705                         goto out;
706         }
707
708 out:
709         igt_global_reset_unlock(i915);
710         return err;
711 }
712
713 static int read_whitelisted_registers(struct i915_gem_context *ctx,
714                                       struct intel_engine_cs *engine,
715                                       struct i915_vma *results)
716 {
717         struct i915_request *rq;
718         int i, err = 0;
719         u32 srm, *cs;
720
721         rq = igt_request_alloc(ctx, engine);
722         if (IS_ERR(rq))
723                 return PTR_ERR(rq);
724
725         srm = MI_STORE_REGISTER_MEM;
726         if (INTEL_GEN(ctx->i915) >= 8)
727                 srm++;
728
729         cs = intel_ring_begin(rq, 4 * engine->whitelist.count);
730         if (IS_ERR(cs)) {
731                 err = PTR_ERR(cs);
732                 goto err_req;
733         }
734
735         for (i = 0; i < engine->whitelist.count; i++) {
736                 u64 offset = results->node.start + sizeof(u32) * i;
737
738                 *cs++ = srm;
739                 *cs++ = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
740                 *cs++ = lower_32_bits(offset);
741                 *cs++ = upper_32_bits(offset);
742         }
743         intel_ring_advance(rq, cs);
744
745 err_req:
746         i915_request_add(rq);
747
748         if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0)
749                 err = -EIO;
750
751         return err;
752 }
753
754 static int scrub_whitelisted_registers(struct i915_gem_context *ctx,
755                                        struct intel_engine_cs *engine)
756 {
757         struct i915_request *rq;
758         struct i915_vma *batch;
759         int i, err = 0;
760         u32 *cs;
761
762         batch = create_batch(ctx);
763         if (IS_ERR(batch))
764                 return PTR_ERR(batch);
765
766         cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
767         if (IS_ERR(cs)) {
768                 err = PTR_ERR(cs);
769                 goto err_batch;
770         }
771
772         *cs++ = MI_LOAD_REGISTER_IMM(engine->whitelist.count);
773         for (i = 0; i < engine->whitelist.count; i++) {
774                 *cs++ = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
775                 *cs++ = 0xffffffff;
776         }
777         *cs++ = MI_BATCH_BUFFER_END;
778
779         i915_gem_object_flush_map(batch->obj);
780         i915_gem_chipset_flush(ctx->i915);
781
782         rq = igt_request_alloc(ctx, engine);
783         if (IS_ERR(rq)) {
784                 err = PTR_ERR(rq);
785                 goto err_unpin;
786         }
787
788         if (engine->emit_init_breadcrumb) { /* Be nice if we hang */
789                 err = engine->emit_init_breadcrumb(rq);
790                 if (err)
791                         goto err_request;
792         }
793
794         /* Perform the writes from an unprivileged "user" batch */
795         err = engine->emit_bb_start(rq, batch->node.start, 0, 0);
796
797 err_request:
798         i915_request_add(rq);
799         if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0)
800                 err = -EIO;
801
802 err_unpin:
803         i915_gem_object_unpin_map(batch->obj);
804 err_batch:
805         i915_vma_unpin_and_release(&batch, 0);
806         return err;
807 }
808
809 struct regmask {
810         i915_reg_t reg;
811         unsigned long gen_mask;
812 };
813
814 static bool find_reg(struct drm_i915_private *i915,
815                      i915_reg_t reg,
816                      const struct regmask *tbl,
817                      unsigned long count)
818 {
819         u32 offset = i915_mmio_reg_offset(reg);
820
821         while (count--) {
822                 if (INTEL_INFO(i915)->gen_mask & tbl->gen_mask &&
823                     i915_mmio_reg_offset(tbl->reg) == offset)
824                         return true;
825                 tbl++;
826         }
827
828         return false;
829 }
830
831 static bool pardon_reg(struct drm_i915_private *i915, i915_reg_t reg)
832 {
833         /* Alas, we must pardon some whitelists. Mistakes already made */
834         static const struct regmask pardon[] = {
835                 { GEN9_CTX_PREEMPT_REG, INTEL_GEN_MASK(9, 9) },
836                 { GEN8_L3SQCREG4, INTEL_GEN_MASK(9, 9) },
837         };
838
839         return find_reg(i915, reg, pardon, ARRAY_SIZE(pardon));
840 }
841
842 static bool result_eq(struct intel_engine_cs *engine,
843                       u32 a, u32 b, i915_reg_t reg)
844 {
845         if (a != b && !pardon_reg(engine->i915, reg)) {
846                 pr_err("Whitelisted register 0x%4x not context saved: A=%08x, B=%08x\n",
847                        i915_mmio_reg_offset(reg), a, b);
848                 return false;
849         }
850
851         return true;
852 }
853
854 static bool writeonly_reg(struct drm_i915_private *i915, i915_reg_t reg)
855 {
856         /* Some registers do not seem to behave and our writes unreadable */
857         static const struct regmask wo[] = {
858                 { GEN9_SLICE_COMMON_ECO_CHICKEN1, INTEL_GEN_MASK(9, 9) },
859         };
860
861         return find_reg(i915, reg, wo, ARRAY_SIZE(wo));
862 }
863
864 static bool result_neq(struct intel_engine_cs *engine,
865                        u32 a, u32 b, i915_reg_t reg)
866 {
867         if (a == b && !writeonly_reg(engine->i915, reg)) {
868                 pr_err("Whitelist register 0x%4x:%08x was unwritable\n",
869                        i915_mmio_reg_offset(reg), a);
870                 return false;
871         }
872
873         return true;
874 }
875
876 static int
877 check_whitelisted_registers(struct intel_engine_cs *engine,
878                             struct i915_vma *A,
879                             struct i915_vma *B,
880                             bool (*fn)(struct intel_engine_cs *engine,
881                                        u32 a, u32 b,
882                                        i915_reg_t reg))
883 {
884         u32 *a, *b;
885         int i, err;
886
887         a = i915_gem_object_pin_map(A->obj, I915_MAP_WB);
888         if (IS_ERR(a))
889                 return PTR_ERR(a);
890
891         b = i915_gem_object_pin_map(B->obj, I915_MAP_WB);
892         if (IS_ERR(b)) {
893                 err = PTR_ERR(b);
894                 goto err_a;
895         }
896
897         err = 0;
898         for (i = 0; i < engine->whitelist.count; i++) {
899                 if (!fn(engine, a[i], b[i], engine->whitelist.list[i].reg))
900                         err = -EINVAL;
901         }
902
903         i915_gem_object_unpin_map(B->obj);
904 err_a:
905         i915_gem_object_unpin_map(A->obj);
906         return err;
907 }
908
909 static int live_isolated_whitelist(void *arg)
910 {
911         struct drm_i915_private *i915 = arg;
912         struct {
913                 struct i915_gem_context *ctx;
914                 struct i915_vma *scratch[2];
915         } client[2] = {};
916         struct intel_engine_cs *engine;
917         enum intel_engine_id id;
918         int i, err = 0;
919
920         /*
921          * Check that a write into a whitelist register works, but
922          * invisible to a second context.
923          */
924
925         if (!intel_engines_has_context_isolation(i915))
926                 return 0;
927
928         if (!i915->kernel_context->vm)
929                 return 0;
930
931         for (i = 0; i < ARRAY_SIZE(client); i++) {
932                 struct i915_gem_context *c;
933
934                 c = kernel_context(i915);
935                 if (IS_ERR(c)) {
936                         err = PTR_ERR(c);
937                         goto err;
938                 }
939
940                 client[i].scratch[0] = create_scratch(c->vm, 1024);
941                 if (IS_ERR(client[i].scratch[0])) {
942                         err = PTR_ERR(client[i].scratch[0]);
943                         kernel_context_close(c);
944                         goto err;
945                 }
946
947                 client[i].scratch[1] = create_scratch(c->vm, 1024);
948                 if (IS_ERR(client[i].scratch[1])) {
949                         err = PTR_ERR(client[i].scratch[1]);
950                         i915_vma_unpin_and_release(&client[i].scratch[0], 0);
951                         kernel_context_close(c);
952                         goto err;
953                 }
954
955                 client[i].ctx = c;
956         }
957
958         for_each_engine(engine, i915, id) {
959                 if (!engine->whitelist.count)
960                         continue;
961
962                 /* Read default values */
963                 err = read_whitelisted_registers(client[0].ctx, engine,
964                                                  client[0].scratch[0]);
965                 if (err)
966                         goto err;
967
968                 /* Try to overwrite registers (should only affect ctx0) */
969                 err = scrub_whitelisted_registers(client[0].ctx, engine);
970                 if (err)
971                         goto err;
972
973                 /* Read values from ctx1, we expect these to be defaults */
974                 err = read_whitelisted_registers(client[1].ctx, engine,
975                                                  client[1].scratch[0]);
976                 if (err)
977                         goto err;
978
979                 /* Verify that both reads return the same default values */
980                 err = check_whitelisted_registers(engine,
981                                                   client[0].scratch[0],
982                                                   client[1].scratch[0],
983                                                   result_eq);
984                 if (err)
985                         goto err;
986
987                 /* Read back the updated values in ctx0 */
988                 err = read_whitelisted_registers(client[0].ctx, engine,
989                                                  client[0].scratch[1]);
990                 if (err)
991                         goto err;
992
993                 /* User should be granted privilege to overwhite regs */
994                 err = check_whitelisted_registers(engine,
995                                                   client[0].scratch[0],
996                                                   client[0].scratch[1],
997                                                   result_neq);
998                 if (err)
999                         goto err;
1000         }
1001
1002 err:
1003         for (i = 0; i < ARRAY_SIZE(client); i++) {
1004                 if (!client[i].ctx)
1005                         break;
1006
1007                 i915_vma_unpin_and_release(&client[i].scratch[1], 0);
1008                 i915_vma_unpin_and_release(&client[i].scratch[0], 0);
1009                 kernel_context_close(client[i].ctx);
1010         }
1011
1012         if (igt_flush_test(i915, I915_WAIT_LOCKED))
1013                 err = -EIO;
1014
1015         return err;
1016 }
1017
1018 static bool
1019 verify_wa_lists(struct i915_gem_context *ctx, struct wa_lists *lists,
1020                 const char *str)
1021 {
1022         struct drm_i915_private *i915 = ctx->i915;
1023         struct i915_gem_engines_iter it;
1024         struct intel_context *ce;
1025         bool ok = true;
1026
1027         ok &= wa_list_verify(&i915->uncore, &lists->gt_wa_list, str);
1028
1029         for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
1030                 enum intel_engine_id id = ce->engine->id;
1031
1032                 ok &= engine_wa_list_verify(ce,
1033                                             &lists->engine[id].wa_list,
1034                                             str) == 0;
1035
1036                 ok &= engine_wa_list_verify(ce,
1037                                             &lists->engine[id].ctx_wa_list,
1038                                             str) == 0;
1039         }
1040         i915_gem_context_unlock_engines(ctx);
1041
1042         return ok;
1043 }
1044
1045 static int
1046 live_gpu_reset_workarounds(void *arg)
1047 {
1048         struct drm_i915_private *i915 = arg;
1049         struct i915_gem_context *ctx;
1050         intel_wakeref_t wakeref;
1051         struct wa_lists lists;
1052         bool ok;
1053
1054         if (!intel_has_gpu_reset(i915))
1055                 return 0;
1056
1057         ctx = kernel_context(i915);
1058         if (IS_ERR(ctx))
1059                 return PTR_ERR(ctx);
1060
1061         pr_info("Verifying after GPU reset...\n");
1062
1063         igt_global_reset_lock(i915);
1064         wakeref = intel_runtime_pm_get(i915);
1065
1066         reference_lists_init(i915, &lists);
1067
1068         ok = verify_wa_lists(ctx, &lists, "before reset");
1069         if (!ok)
1070                 goto out;
1071
1072         i915_reset(i915, ALL_ENGINES, "live_workarounds");
1073
1074         ok = verify_wa_lists(ctx, &lists, "after reset");
1075
1076 out:
1077         kernel_context_close(ctx);
1078         reference_lists_fini(i915, &lists);
1079         intel_runtime_pm_put(i915, wakeref);
1080         igt_global_reset_unlock(i915);
1081
1082         return ok ? 0 : -ESRCH;
1083 }
1084
1085 static int
1086 live_engine_reset_workarounds(void *arg)
1087 {
1088         struct drm_i915_private *i915 = arg;
1089         struct intel_engine_cs *engine;
1090         struct i915_gem_context *ctx;
1091         struct igt_spinner spin;
1092         enum intel_engine_id id;
1093         struct i915_request *rq;
1094         intel_wakeref_t wakeref;
1095         struct wa_lists lists;
1096         int ret = 0;
1097
1098         if (!intel_has_reset_engine(i915))
1099                 return 0;
1100
1101         ctx = kernel_context(i915);
1102         if (IS_ERR(ctx))
1103                 return PTR_ERR(ctx);
1104
1105         igt_global_reset_lock(i915);
1106         wakeref = intel_runtime_pm_get(i915);
1107
1108         reference_lists_init(i915, &lists);
1109
1110         for_each_engine(engine, i915, id) {
1111                 bool ok;
1112
1113                 pr_info("Verifying after %s reset...\n", engine->name);
1114
1115                 ok = verify_wa_lists(ctx, &lists, "before reset");
1116                 if (!ok) {
1117                         ret = -ESRCH;
1118                         goto err;
1119                 }
1120
1121                 i915_reset_engine(engine, "live_workarounds");
1122
1123                 ok = verify_wa_lists(ctx, &lists, "after idle reset");
1124                 if (!ok) {
1125                         ret = -ESRCH;
1126                         goto err;
1127                 }
1128
1129                 ret = igt_spinner_init(&spin, i915);
1130                 if (ret)
1131                         goto err;
1132
1133                 rq = igt_spinner_create_request(&spin, ctx, engine, MI_NOOP);
1134                 if (IS_ERR(rq)) {
1135                         ret = PTR_ERR(rq);
1136                         igt_spinner_fini(&spin);
1137                         goto err;
1138                 }
1139
1140                 i915_request_add(rq);
1141
1142                 if (!igt_wait_for_spinner(&spin, rq)) {
1143                         pr_err("Spinner failed to start\n");
1144                         igt_spinner_fini(&spin);
1145                         ret = -ETIMEDOUT;
1146                         goto err;
1147                 }
1148
1149                 i915_reset_engine(engine, "live_workarounds");
1150
1151                 igt_spinner_end(&spin);
1152                 igt_spinner_fini(&spin);
1153
1154                 ok = verify_wa_lists(ctx, &lists, "after busy reset");
1155                 if (!ok) {
1156                         ret = -ESRCH;
1157                         goto err;
1158                 }
1159         }
1160
1161 err:
1162         reference_lists_fini(i915, &lists);
1163         intel_runtime_pm_put(i915, wakeref);
1164         igt_global_reset_unlock(i915);
1165         kernel_context_close(ctx);
1166
1167         igt_flush_test(i915, I915_WAIT_LOCKED);
1168
1169         return ret;
1170 }
1171
1172 int intel_workarounds_live_selftests(struct drm_i915_private *i915)
1173 {
1174         static const struct i915_subtest tests[] = {
1175                 SUBTEST(live_dirty_whitelist),
1176                 SUBTEST(live_reset_whitelist),
1177                 SUBTEST(live_isolated_whitelist),
1178                 SUBTEST(live_gpu_reset_workarounds),
1179                 SUBTEST(live_engine_reset_workarounds),
1180         };
1181         int err;
1182
1183         if (i915_terminally_wedged(i915))
1184                 return 0;
1185
1186         mutex_lock(&i915->drm.struct_mutex);
1187         err = i915_subtests(tests, i915);
1188         mutex_unlock(&i915->drm.struct_mutex);
1189
1190         return err;
1191 }