]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
1bc3b8026400d20fb08be3cce070b1df0efd6288
[linux.git] / drivers / gpu / drm / i915 / gem / selftests / i915_gem_context.c
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2017 Intel Corporation
5  */
6
7 #include <linux/prime_numbers.h>
8
9 #include "gem/i915_gem_pm.h"
10 #include "gt/intel_reset.h"
11 #include "i915_selftest.h"
12
13 #include "gem/selftests/igt_gem_utils.h"
14 #include "selftests/i915_random.h"
15 #include "selftests/igt_flush_test.h"
16 #include "selftests/igt_live_test.h"
17 #include "selftests/igt_reset.h"
18 #include "selftests/igt_spinner.h"
19 #include "selftests/mock_drm.h"
20 #include "selftests/mock_gem_device.h"
21
22 #include "huge_gem_object.h"
23 #include "igt_gem_utils.h"
24
25 #define DW_PER_PAGE (PAGE_SIZE / sizeof(u32))
26
27 static int live_nop_switch(void *arg)
28 {
29         const unsigned int nctx = 1024;
30         struct drm_i915_private *i915 = arg;
31         struct intel_engine_cs *engine;
32         struct i915_gem_context **ctx;
33         enum intel_engine_id id;
34         intel_wakeref_t wakeref;
35         struct igt_live_test t;
36         struct drm_file *file;
37         unsigned long n;
38         int err = -ENODEV;
39
40         /*
41          * Create as many contexts as we can feasibly get away with
42          * and check we can switch between them rapidly.
43          *
44          * Serves as very simple stress test for submission and HW switching
45          * between contexts.
46          */
47
48         if (!DRIVER_CAPS(i915)->has_logical_contexts)
49                 return 0;
50
51         file = mock_file(i915);
52         if (IS_ERR(file))
53                 return PTR_ERR(file);
54
55         mutex_lock(&i915->drm.struct_mutex);
56         wakeref = intel_runtime_pm_get(i915);
57
58         ctx = kcalloc(nctx, sizeof(*ctx), GFP_KERNEL);
59         if (!ctx) {
60                 err = -ENOMEM;
61                 goto out_unlock;
62         }
63
64         for (n = 0; n < nctx; n++) {
65                 ctx[n] = live_context(i915, file);
66                 if (IS_ERR(ctx[n])) {
67                         err = PTR_ERR(ctx[n]);
68                         goto out_unlock;
69                 }
70         }
71
72         for_each_engine(engine, i915, id) {
73                 struct i915_request *rq;
74                 unsigned long end_time, prime;
75                 ktime_t times[2] = {};
76
77                 times[0] = ktime_get_raw();
78                 for (n = 0; n < nctx; n++) {
79                         rq = igt_request_alloc(ctx[n], engine);
80                         if (IS_ERR(rq)) {
81                                 err = PTR_ERR(rq);
82                                 goto out_unlock;
83                         }
84                         i915_request_add(rq);
85                 }
86                 if (i915_request_wait(rq,
87                                       I915_WAIT_LOCKED,
88                                       HZ / 5) < 0) {
89                         pr_err("Failed to populated %d contexts\n", nctx);
90                         i915_gem_set_wedged(i915);
91                         err = -EIO;
92                         goto out_unlock;
93                 }
94
95                 times[1] = ktime_get_raw();
96
97                 pr_info("Populated %d contexts on %s in %lluns\n",
98                         nctx, engine->name, ktime_to_ns(times[1] - times[0]));
99
100                 err = igt_live_test_begin(&t, i915, __func__, engine->name);
101                 if (err)
102                         goto out_unlock;
103
104                 end_time = jiffies + i915_selftest.timeout_jiffies;
105                 for_each_prime_number_from(prime, 2, 8192) {
106                         times[1] = ktime_get_raw();
107
108                         for (n = 0; n < prime; n++) {
109                                 rq = igt_request_alloc(ctx[n % nctx], engine);
110                                 if (IS_ERR(rq)) {
111                                         err = PTR_ERR(rq);
112                                         goto out_unlock;
113                                 }
114
115                                 /*
116                                  * This space is left intentionally blank.
117                                  *
118                                  * We do not actually want to perform any
119                                  * action with this request, we just want
120                                  * to measure the latency in allocation
121                                  * and submission of our breadcrumbs -
122                                  * ensuring that the bare request is sufficient
123                                  * for the system to work (i.e. proper HEAD
124                                  * tracking of the rings, interrupt handling,
125                                  * etc). It also gives us the lowest bounds
126                                  * for latency.
127                                  */
128
129                                 i915_request_add(rq);
130                         }
131                         if (i915_request_wait(rq,
132                                               I915_WAIT_LOCKED,
133                                               HZ / 5) < 0) {
134                                 pr_err("Switching between %ld contexts timed out\n",
135                                        prime);
136                                 i915_gem_set_wedged(i915);
137                                 break;
138                         }
139
140                         times[1] = ktime_sub(ktime_get_raw(), times[1]);
141                         if (prime == 2)
142                                 times[0] = times[1];
143
144                         if (__igt_timeout(end_time, NULL))
145                                 break;
146                 }
147
148                 err = igt_live_test_end(&t);
149                 if (err)
150                         goto out_unlock;
151
152                 pr_info("Switch latencies on %s: 1 = %lluns, %lu = %lluns\n",
153                         engine->name,
154                         ktime_to_ns(times[0]),
155                         prime - 1, div64_u64(ktime_to_ns(times[1]), prime - 1));
156         }
157
158 out_unlock:
159         intel_runtime_pm_put(i915, wakeref);
160         mutex_unlock(&i915->drm.struct_mutex);
161         mock_file_free(i915, file);
162         return err;
163 }
164
165 static struct i915_vma *
166 gpu_fill_dw(struct i915_vma *vma, u64 offset, unsigned long count, u32 value)
167 {
168         struct drm_i915_gem_object *obj;
169         const int gen = INTEL_GEN(vma->vm->i915);
170         unsigned long n, size;
171         u32 *cmd;
172         int err;
173
174         size = (4 * count + 1) * sizeof(u32);
175         size = round_up(size, PAGE_SIZE);
176         obj = i915_gem_object_create_internal(vma->vm->i915, size);
177         if (IS_ERR(obj))
178                 return ERR_CAST(obj);
179
180         cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
181         if (IS_ERR(cmd)) {
182                 err = PTR_ERR(cmd);
183                 goto err;
184         }
185
186         GEM_BUG_ON(offset + (count - 1) * PAGE_SIZE > vma->node.size);
187         offset += vma->node.start;
188
189         for (n = 0; n < count; n++) {
190                 if (gen >= 8) {
191                         *cmd++ = MI_STORE_DWORD_IMM_GEN4;
192                         *cmd++ = lower_32_bits(offset);
193                         *cmd++ = upper_32_bits(offset);
194                         *cmd++ = value;
195                 } else if (gen >= 4) {
196                         *cmd++ = MI_STORE_DWORD_IMM_GEN4 |
197                                 (gen < 6 ? MI_USE_GGTT : 0);
198                         *cmd++ = 0;
199                         *cmd++ = offset;
200                         *cmd++ = value;
201                 } else {
202                         *cmd++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
203                         *cmd++ = offset;
204                         *cmd++ = value;
205                 }
206                 offset += PAGE_SIZE;
207         }
208         *cmd = MI_BATCH_BUFFER_END;
209         i915_gem_object_flush_map(obj);
210         i915_gem_object_unpin_map(obj);
211
212         i915_gem_object_lock(obj);
213         err = i915_gem_object_set_to_gtt_domain(obj, false);
214         i915_gem_object_unlock(obj);
215         if (err)
216                 goto err;
217
218         vma = i915_vma_instance(obj, vma->vm, NULL);
219         if (IS_ERR(vma)) {
220                 err = PTR_ERR(vma);
221                 goto err;
222         }
223
224         err = i915_vma_pin(vma, 0, 0, PIN_USER);
225         if (err)
226                 goto err;
227
228         return vma;
229
230 err:
231         i915_gem_object_put(obj);
232         return ERR_PTR(err);
233 }
234
235 static unsigned long real_page_count(struct drm_i915_gem_object *obj)
236 {
237         return huge_gem_object_phys_size(obj) >> PAGE_SHIFT;
238 }
239
240 static unsigned long fake_page_count(struct drm_i915_gem_object *obj)
241 {
242         return huge_gem_object_dma_size(obj) >> PAGE_SHIFT;
243 }
244
245 static int gpu_fill(struct drm_i915_gem_object *obj,
246                     struct i915_gem_context *ctx,
247                     struct intel_engine_cs *engine,
248                     unsigned int dw)
249 {
250         struct drm_i915_private *i915 = to_i915(obj->base.dev);
251         struct i915_address_space *vm =
252                 ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
253         struct i915_request *rq;
254         struct i915_vma *vma;
255         struct i915_vma *batch;
256         unsigned int flags;
257         int err;
258
259         GEM_BUG_ON(obj->base.size > vm->total);
260         GEM_BUG_ON(!intel_engine_can_store_dword(engine));
261
262         vma = i915_vma_instance(obj, vm, NULL);
263         if (IS_ERR(vma))
264                 return PTR_ERR(vma);
265
266         i915_gem_object_lock(obj);
267         err = i915_gem_object_set_to_gtt_domain(obj, false);
268         i915_gem_object_unlock(obj);
269         if (err)
270                 return err;
271
272         err = i915_vma_pin(vma, 0, 0, PIN_HIGH | PIN_USER);
273         if (err)
274                 return err;
275
276         /* Within the GTT the huge objects maps every page onto
277          * its 1024 real pages (using phys_pfn = dma_pfn % 1024).
278          * We set the nth dword within the page using the nth
279          * mapping via the GTT - this should exercise the GTT mapping
280          * whilst checking that each context provides a unique view
281          * into the object.
282          */
283         batch = gpu_fill_dw(vma,
284                             (dw * real_page_count(obj)) << PAGE_SHIFT |
285                             (dw * sizeof(u32)),
286                             real_page_count(obj),
287                             dw);
288         if (IS_ERR(batch)) {
289                 err = PTR_ERR(batch);
290                 goto err_vma;
291         }
292
293         rq = igt_request_alloc(ctx, engine);
294         if (IS_ERR(rq)) {
295                 err = PTR_ERR(rq);
296                 goto err_batch;
297         }
298
299         flags = 0;
300         if (INTEL_GEN(vm->i915) <= 5)
301                 flags |= I915_DISPATCH_SECURE;
302
303         err = engine->emit_bb_start(rq,
304                                     batch->node.start, batch->node.size,
305                                     flags);
306         if (err)
307                 goto err_request;
308
309         i915_vma_lock(batch);
310         err = i915_vma_move_to_active(batch, rq, 0);
311         i915_vma_unlock(batch);
312         if (err)
313                 goto skip_request;
314
315         i915_vma_lock(vma);
316         err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
317         i915_vma_unlock(vma);
318         if (err)
319                 goto skip_request;
320
321         i915_request_add(rq);
322
323         i915_vma_unpin(batch);
324         i915_vma_close(batch);
325         i915_vma_put(batch);
326
327         i915_vma_unpin(vma);
328
329         return 0;
330
331 skip_request:
332         i915_request_skip(rq, err);
333 err_request:
334         i915_request_add(rq);
335 err_batch:
336         i915_vma_unpin(batch);
337         i915_vma_put(batch);
338 err_vma:
339         i915_vma_unpin(vma);
340         return err;
341 }
342
343 static int cpu_fill(struct drm_i915_gem_object *obj, u32 value)
344 {
345         const bool has_llc = HAS_LLC(to_i915(obj->base.dev));
346         unsigned int n, m, need_flush;
347         int err;
348
349         err = i915_gem_object_prepare_write(obj, &need_flush);
350         if (err)
351                 return err;
352
353         for (n = 0; n < real_page_count(obj); n++) {
354                 u32 *map;
355
356                 map = kmap_atomic(i915_gem_object_get_page(obj, n));
357                 for (m = 0; m < DW_PER_PAGE; m++)
358                         map[m] = value;
359                 if (!has_llc)
360                         drm_clflush_virt_range(map, PAGE_SIZE);
361                 kunmap_atomic(map);
362         }
363
364         i915_gem_object_finish_access(obj);
365         obj->read_domains = I915_GEM_DOMAIN_GTT | I915_GEM_DOMAIN_CPU;
366         obj->write_domain = 0;
367         return 0;
368 }
369
370 static noinline int cpu_check(struct drm_i915_gem_object *obj,
371                               unsigned int idx, unsigned int max)
372 {
373         unsigned int n, m, needs_flush;
374         int err;
375
376         err = i915_gem_object_prepare_read(obj, &needs_flush);
377         if (err)
378                 return err;
379
380         for (n = 0; n < real_page_count(obj); n++) {
381                 u32 *map;
382
383                 map = kmap_atomic(i915_gem_object_get_page(obj, n));
384                 if (needs_flush & CLFLUSH_BEFORE)
385                         drm_clflush_virt_range(map, PAGE_SIZE);
386
387                 for (m = 0; m < max; m++) {
388                         if (map[m] != m) {
389                                 pr_err("%pS: Invalid value at object %d page %d/%ld, offset %d/%d: found %x expected %x\n",
390                                        __builtin_return_address(0), idx,
391                                        n, real_page_count(obj), m, max,
392                                        map[m], m);
393                                 err = -EINVAL;
394                                 goto out_unmap;
395                         }
396                 }
397
398                 for (; m < DW_PER_PAGE; m++) {
399                         if (map[m] != STACK_MAGIC) {
400                                 pr_err("%pS: Invalid value at object %d page %d, offset %d: found %x expected %x (uninitialised)\n",
401                                        __builtin_return_address(0), idx, n, m,
402                                        map[m], STACK_MAGIC);
403                                 err = -EINVAL;
404                                 goto out_unmap;
405                         }
406                 }
407
408 out_unmap:
409                 kunmap_atomic(map);
410                 if (err)
411                         break;
412         }
413
414         i915_gem_object_finish_access(obj);
415         return err;
416 }
417
418 static int file_add_object(struct drm_file *file,
419                             struct drm_i915_gem_object *obj)
420 {
421         int err;
422
423         GEM_BUG_ON(obj->base.handle_count);
424
425         /* tie the object to the drm_file for easy reaping */
426         err = idr_alloc(&file->object_idr, &obj->base, 1, 0, GFP_KERNEL);
427         if (err < 0)
428                 return  err;
429
430         i915_gem_object_get(obj);
431         obj->base.handle_count++;
432         return 0;
433 }
434
435 static struct drm_i915_gem_object *
436 create_test_object(struct i915_gem_context *ctx,
437                    struct drm_file *file,
438                    struct list_head *objects)
439 {
440         struct drm_i915_gem_object *obj;
441         struct i915_address_space *vm =
442                 ctx->ppgtt ? &ctx->ppgtt->vm : &ctx->i915->ggtt.vm;
443         u64 size;
444         int err;
445
446         size = min(vm->total / 2, 1024ull * DW_PER_PAGE * PAGE_SIZE);
447         size = round_down(size, DW_PER_PAGE * PAGE_SIZE);
448
449         obj = huge_gem_object(ctx->i915, DW_PER_PAGE * PAGE_SIZE, size);
450         if (IS_ERR(obj))
451                 return obj;
452
453         err = file_add_object(file, obj);
454         i915_gem_object_put(obj);
455         if (err)
456                 return ERR_PTR(err);
457
458         err = cpu_fill(obj, STACK_MAGIC);
459         if (err) {
460                 pr_err("Failed to fill object with cpu, err=%d\n",
461                        err);
462                 return ERR_PTR(err);
463         }
464
465         list_add_tail(&obj->st_link, objects);
466         return obj;
467 }
468
469 static unsigned long max_dwords(struct drm_i915_gem_object *obj)
470 {
471         unsigned long npages = fake_page_count(obj);
472
473         GEM_BUG_ON(!IS_ALIGNED(npages, DW_PER_PAGE));
474         return npages / DW_PER_PAGE;
475 }
476
477 static int igt_ctx_exec(void *arg)
478 {
479         struct drm_i915_private *i915 = arg;
480         struct intel_engine_cs *engine;
481         enum intel_engine_id id;
482         int err = -ENODEV;
483
484         /*
485          * Create a few different contexts (with different mm) and write
486          * through each ctx/mm using the GPU making sure those writes end
487          * up in the expected pages of our obj.
488          */
489
490         if (!DRIVER_CAPS(i915)->has_logical_contexts)
491                 return 0;
492
493         for_each_engine(engine, i915, id) {
494                 struct drm_i915_gem_object *obj = NULL;
495                 unsigned long ncontexts, ndwords, dw;
496                 struct igt_live_test t;
497                 struct drm_file *file;
498                 IGT_TIMEOUT(end_time);
499                 LIST_HEAD(objects);
500
501                 if (!intel_engine_can_store_dword(engine))
502                         continue;
503
504                 if (!engine->context_size)
505                         continue; /* No logical context support in HW */
506
507                 file = mock_file(i915);
508                 if (IS_ERR(file))
509                         return PTR_ERR(file);
510
511                 mutex_lock(&i915->drm.struct_mutex);
512
513                 err = igt_live_test_begin(&t, i915, __func__, engine->name);
514                 if (err)
515                         goto out_unlock;
516
517                 ncontexts = 0;
518                 ndwords = 0;
519                 dw = 0;
520                 while (!time_after(jiffies, end_time)) {
521                         struct i915_gem_context *ctx;
522                         intel_wakeref_t wakeref;
523
524                         ctx = live_context(i915, file);
525                         if (IS_ERR(ctx)) {
526                                 err = PTR_ERR(ctx);
527                                 goto out_unlock;
528                         }
529
530                         if (!obj) {
531                                 obj = create_test_object(ctx, file, &objects);
532                                 if (IS_ERR(obj)) {
533                                         err = PTR_ERR(obj);
534                                         goto out_unlock;
535                                 }
536                         }
537
538                         with_intel_runtime_pm(i915, wakeref)
539                                 err = gpu_fill(obj, ctx, engine, dw);
540                         if (err) {
541                                 pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
542                                        ndwords, dw, max_dwords(obj),
543                                        engine->name, ctx->hw_id,
544                                        yesno(!!ctx->ppgtt), err);
545                                 goto out_unlock;
546                         }
547
548                         if (++dw == max_dwords(obj)) {
549                                 obj = NULL;
550                                 dw = 0;
551                         }
552
553                         ndwords++;
554                         ncontexts++;
555                 }
556
557                 pr_info("Submitted %lu contexts to %s, filling %lu dwords\n",
558                         ncontexts, engine->name, ndwords);
559
560                 ncontexts = dw = 0;
561                 list_for_each_entry(obj, &objects, st_link) {
562                         unsigned int rem =
563                                 min_t(unsigned int, ndwords - dw, max_dwords(obj));
564
565                         err = cpu_check(obj, ncontexts++, rem);
566                         if (err)
567                                 break;
568
569                         dw += rem;
570                 }
571
572 out_unlock:
573                 if (igt_live_test_end(&t))
574                         err = -EIO;
575                 mutex_unlock(&i915->drm.struct_mutex);
576
577                 mock_file_free(i915, file);
578                 if (err)
579                         return err;
580         }
581
582         return 0;
583 }
584
585 static int igt_shared_ctx_exec(void *arg)
586 {
587         struct drm_i915_private *i915 = arg;
588         struct i915_gem_context *parent;
589         struct intel_engine_cs *engine;
590         enum intel_engine_id id;
591         struct igt_live_test t;
592         struct drm_file *file;
593         int err = 0;
594
595         /*
596          * Create a few different contexts with the same mm and write
597          * through each ctx using the GPU making sure those writes end
598          * up in the expected pages of our obj.
599          */
600         if (!DRIVER_CAPS(i915)->has_logical_contexts)
601                 return 0;
602
603         file = mock_file(i915);
604         if (IS_ERR(file))
605                 return PTR_ERR(file);
606
607         mutex_lock(&i915->drm.struct_mutex);
608
609         parent = live_context(i915, file);
610         if (IS_ERR(parent)) {
611                 err = PTR_ERR(parent);
612                 goto out_unlock;
613         }
614
615         if (!parent->ppgtt) { /* not full-ppgtt; nothing to share */
616                 err = 0;
617                 goto out_unlock;
618         }
619
620         err = igt_live_test_begin(&t, i915, __func__, "");
621         if (err)
622                 goto out_unlock;
623
624         for_each_engine(engine, i915, id) {
625                 unsigned long ncontexts, ndwords, dw;
626                 struct drm_i915_gem_object *obj = NULL;
627                 IGT_TIMEOUT(end_time);
628                 LIST_HEAD(objects);
629
630                 if (!intel_engine_can_store_dword(engine))
631                         continue;
632
633                 dw = 0;
634                 ndwords = 0;
635                 ncontexts = 0;
636                 while (!time_after(jiffies, end_time)) {
637                         struct i915_gem_context *ctx;
638                         intel_wakeref_t wakeref;
639
640                         ctx = kernel_context(i915);
641                         if (IS_ERR(ctx)) {
642                                 err = PTR_ERR(ctx);
643                                 goto out_test;
644                         }
645
646                         __assign_ppgtt(ctx, parent->ppgtt);
647
648                         if (!obj) {
649                                 obj = create_test_object(parent, file, &objects);
650                                 if (IS_ERR(obj)) {
651                                         err = PTR_ERR(obj);
652                                         kernel_context_close(ctx);
653                                         goto out_test;
654                                 }
655                         }
656
657                         err = 0;
658                         with_intel_runtime_pm(i915, wakeref)
659                                 err = gpu_fill(obj, ctx, engine, dw);
660                         if (err) {
661                                 pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
662                                        ndwords, dw, max_dwords(obj),
663                                        engine->name, ctx->hw_id,
664                                        yesno(!!ctx->ppgtt), err);
665                                 kernel_context_close(ctx);
666                                 goto out_test;
667                         }
668
669                         if (++dw == max_dwords(obj)) {
670                                 obj = NULL;
671                                 dw = 0;
672                         }
673
674                         ndwords++;
675                         ncontexts++;
676
677                         kernel_context_close(ctx);
678                 }
679                 pr_info("Submitted %lu contexts to %s, filling %lu dwords\n",
680                         ncontexts, engine->name, ndwords);
681
682                 ncontexts = dw = 0;
683                 list_for_each_entry(obj, &objects, st_link) {
684                         unsigned int rem =
685                                 min_t(unsigned int, ndwords - dw, max_dwords(obj));
686
687                         err = cpu_check(obj, ncontexts++, rem);
688                         if (err)
689                                 goto out_test;
690
691                         dw += rem;
692                 }
693         }
694 out_test:
695         if (igt_live_test_end(&t))
696                 err = -EIO;
697 out_unlock:
698         mutex_unlock(&i915->drm.struct_mutex);
699
700         mock_file_free(i915, file);
701         return err;
702 }
703
704 static struct i915_vma *rpcs_query_batch(struct i915_vma *vma)
705 {
706         struct drm_i915_gem_object *obj;
707         u32 *cmd;
708         int err;
709
710         if (INTEL_GEN(vma->vm->i915) < 8)
711                 return ERR_PTR(-EINVAL);
712
713         obj = i915_gem_object_create_internal(vma->vm->i915, PAGE_SIZE);
714         if (IS_ERR(obj))
715                 return ERR_CAST(obj);
716
717         cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
718         if (IS_ERR(cmd)) {
719                 err = PTR_ERR(cmd);
720                 goto err;
721         }
722
723         *cmd++ = MI_STORE_REGISTER_MEM_GEN8;
724         *cmd++ = i915_mmio_reg_offset(GEN8_R_PWR_CLK_STATE);
725         *cmd++ = lower_32_bits(vma->node.start);
726         *cmd++ = upper_32_bits(vma->node.start);
727         *cmd = MI_BATCH_BUFFER_END;
728
729         __i915_gem_object_flush_map(obj, 0, 64);
730         i915_gem_object_unpin_map(obj);
731
732         vma = i915_vma_instance(obj, vma->vm, NULL);
733         if (IS_ERR(vma)) {
734                 err = PTR_ERR(vma);
735                 goto err;
736         }
737
738         err = i915_vma_pin(vma, 0, 0, PIN_USER);
739         if (err)
740                 goto err;
741
742         return vma;
743
744 err:
745         i915_gem_object_put(obj);
746         return ERR_PTR(err);
747 }
748
749 static int
750 emit_rpcs_query(struct drm_i915_gem_object *obj,
751                 struct intel_context *ce,
752                 struct i915_request **rq_out)
753 {
754         struct i915_request *rq;
755         struct i915_vma *batch;
756         struct i915_vma *vma;
757         int err;
758
759         GEM_BUG_ON(!intel_engine_can_store_dword(ce->engine));
760
761         vma = i915_vma_instance(obj, &ce->gem_context->ppgtt->vm, NULL);
762         if (IS_ERR(vma))
763                 return PTR_ERR(vma);
764
765         i915_gem_object_lock(obj);
766         err = i915_gem_object_set_to_gtt_domain(obj, false);
767         i915_gem_object_unlock(obj);
768         if (err)
769                 return err;
770
771         err = i915_vma_pin(vma, 0, 0, PIN_USER);
772         if (err)
773                 return err;
774
775         batch = rpcs_query_batch(vma);
776         if (IS_ERR(batch)) {
777                 err = PTR_ERR(batch);
778                 goto err_vma;
779         }
780
781         rq = i915_request_create(ce);
782         if (IS_ERR(rq)) {
783                 err = PTR_ERR(rq);
784                 goto err_batch;
785         }
786
787         err = rq->engine->emit_bb_start(rq,
788                                         batch->node.start, batch->node.size,
789                                         0);
790         if (err)
791                 goto err_request;
792
793         i915_vma_lock(batch);
794         err = i915_vma_move_to_active(batch, rq, 0);
795         i915_vma_unlock(batch);
796         if (err)
797                 goto skip_request;
798
799         i915_vma_lock(vma);
800         err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
801         i915_vma_unlock(vma);
802         if (err)
803                 goto skip_request;
804
805         i915_vma_unpin(batch);
806         i915_vma_close(batch);
807         i915_vma_put(batch);
808
809         i915_vma_unpin(vma);
810
811         *rq_out = i915_request_get(rq);
812
813         i915_request_add(rq);
814
815         return 0;
816
817 skip_request:
818         i915_request_skip(rq, err);
819 err_request:
820         i915_request_add(rq);
821 err_batch:
822         i915_vma_unpin(batch);
823         i915_vma_put(batch);
824 err_vma:
825         i915_vma_unpin(vma);
826
827         return err;
828 }
829
830 #define TEST_IDLE       BIT(0)
831 #define TEST_BUSY       BIT(1)
832 #define TEST_RESET      BIT(2)
833
834 static int
835 __sseu_prepare(struct drm_i915_private *i915,
836                const char *name,
837                unsigned int flags,
838                struct intel_context *ce,
839                struct igt_spinner **spin)
840 {
841         struct i915_request *rq;
842         int ret;
843
844         *spin = NULL;
845         if (!(flags & (TEST_BUSY | TEST_RESET)))
846                 return 0;
847
848         *spin = kzalloc(sizeof(**spin), GFP_KERNEL);
849         if (!*spin)
850                 return -ENOMEM;
851
852         ret = igt_spinner_init(*spin, i915);
853         if (ret)
854                 goto err_free;
855
856         rq = igt_spinner_create_request(*spin,
857                                         ce->gem_context,
858                                         ce->engine,
859                                         MI_NOOP);
860         if (IS_ERR(rq)) {
861                 ret = PTR_ERR(rq);
862                 goto err_fini;
863         }
864
865         i915_request_add(rq);
866
867         if (!igt_wait_for_spinner(*spin, rq)) {
868                 pr_err("%s: Spinner failed to start!\n", name);
869                 ret = -ETIMEDOUT;
870                 goto err_end;
871         }
872
873         return 0;
874
875 err_end:
876         igt_spinner_end(*spin);
877 err_fini:
878         igt_spinner_fini(*spin);
879 err_free:
880         kfree(fetch_and_zero(spin));
881         return ret;
882 }
883
884 static int
885 __read_slice_count(struct drm_i915_private *i915,
886                    struct intel_context *ce,
887                    struct drm_i915_gem_object *obj,
888                    struct igt_spinner *spin,
889                    u32 *rpcs)
890 {
891         struct i915_request *rq = NULL;
892         u32 s_mask, s_shift;
893         unsigned int cnt;
894         u32 *buf, val;
895         long ret;
896
897         ret = emit_rpcs_query(obj, ce, &rq);
898         if (ret)
899                 return ret;
900
901         if (spin)
902                 igt_spinner_end(spin);
903
904         ret = i915_request_wait(rq, I915_WAIT_LOCKED, MAX_SCHEDULE_TIMEOUT);
905         i915_request_put(rq);
906         if (ret < 0)
907                 return ret;
908
909         buf = i915_gem_object_pin_map(obj, I915_MAP_WB);
910         if (IS_ERR(buf)) {
911                 ret = PTR_ERR(buf);
912                 return ret;
913         }
914
915         if (INTEL_GEN(i915) >= 11) {
916                 s_mask = GEN11_RPCS_S_CNT_MASK;
917                 s_shift = GEN11_RPCS_S_CNT_SHIFT;
918         } else {
919                 s_mask = GEN8_RPCS_S_CNT_MASK;
920                 s_shift = GEN8_RPCS_S_CNT_SHIFT;
921         }
922
923         val = *buf;
924         cnt = (val & s_mask) >> s_shift;
925         *rpcs = val;
926
927         i915_gem_object_unpin_map(obj);
928
929         return cnt;
930 }
931
932 static int
933 __check_rpcs(const char *name, u32 rpcs, int slices, unsigned int expected,
934              const char *prefix, const char *suffix)
935 {
936         if (slices == expected)
937                 return 0;
938
939         if (slices < 0) {
940                 pr_err("%s: %s read slice count failed with %d%s\n",
941                        name, prefix, slices, suffix);
942                 return slices;
943         }
944
945         pr_err("%s: %s slice count %d is not %u%s\n",
946                name, prefix, slices, expected, suffix);
947
948         pr_info("RPCS=0x%x; %u%sx%u%s\n",
949                 rpcs, slices,
950                 (rpcs & GEN8_RPCS_S_CNT_ENABLE) ? "*" : "",
951                 (rpcs & GEN8_RPCS_SS_CNT_MASK) >> GEN8_RPCS_SS_CNT_SHIFT,
952                 (rpcs & GEN8_RPCS_SS_CNT_ENABLE) ? "*" : "");
953
954         return -EINVAL;
955 }
956
957 static int
958 __sseu_finish(struct drm_i915_private *i915,
959               const char *name,
960               unsigned int flags,
961               struct intel_context *ce,
962               struct drm_i915_gem_object *obj,
963               unsigned int expected,
964               struct igt_spinner *spin)
965 {
966         unsigned int slices = hweight32(ce->engine->sseu.slice_mask);
967         u32 rpcs = 0;
968         int ret = 0;
969
970         if (flags & TEST_RESET) {
971                 ret = i915_reset_engine(ce->engine, "sseu");
972                 if (ret)
973                         goto out;
974         }
975
976         ret = __read_slice_count(i915, ce, obj,
977                                  flags & TEST_RESET ? NULL : spin, &rpcs);
978         ret = __check_rpcs(name, rpcs, ret, expected, "Context", "!");
979         if (ret)
980                 goto out;
981
982         ret = __read_slice_count(i915, ce->engine->kernel_context, obj,
983                                  NULL, &rpcs);
984         ret = __check_rpcs(name, rpcs, ret, slices, "Kernel context", "!");
985
986 out:
987         if (spin)
988                 igt_spinner_end(spin);
989
990         if ((flags & TEST_IDLE) && ret == 0) {
991                 ret = i915_gem_wait_for_idle(i915,
992                                              I915_WAIT_LOCKED,
993                                              MAX_SCHEDULE_TIMEOUT);
994                 if (ret)
995                         return ret;
996
997                 ret = __read_slice_count(i915, ce, obj, NULL, &rpcs);
998                 ret = __check_rpcs(name, rpcs, ret, expected,
999                                    "Context", " after idle!");
1000         }
1001
1002         return ret;
1003 }
1004
1005 static int
1006 __sseu_test(struct drm_i915_private *i915,
1007             const char *name,
1008             unsigned int flags,
1009             struct intel_context *ce,
1010             struct drm_i915_gem_object *obj,
1011             struct intel_sseu sseu)
1012 {
1013         struct igt_spinner *spin = NULL;
1014         int ret;
1015
1016         ret = __sseu_prepare(i915, name, flags, ce, &spin);
1017         if (ret)
1018                 return ret;
1019
1020         ret = __intel_context_reconfigure_sseu(ce, sseu);
1021         if (ret)
1022                 goto out_spin;
1023
1024         ret = __sseu_finish(i915, name, flags, ce, obj,
1025                             hweight32(sseu.slice_mask), spin);
1026
1027 out_spin:
1028         if (spin) {
1029                 igt_spinner_end(spin);
1030                 igt_spinner_fini(spin);
1031                 kfree(spin);
1032         }
1033         return ret;
1034 }
1035
1036 static int
1037 __igt_ctx_sseu(struct drm_i915_private *i915,
1038                const char *name,
1039                unsigned int flags)
1040 {
1041         struct intel_engine_cs *engine = i915->engine[RCS0];
1042         struct intel_sseu default_sseu = engine->sseu;
1043         struct drm_i915_gem_object *obj;
1044         struct i915_gem_context *ctx;
1045         struct intel_context *ce;
1046         struct intel_sseu pg_sseu;
1047         intel_wakeref_t wakeref;
1048         struct drm_file *file;
1049         int ret;
1050
1051         if (INTEL_GEN(i915) < 9)
1052                 return 0;
1053
1054         if (!RUNTIME_INFO(i915)->sseu.has_slice_pg)
1055                 return 0;
1056
1057         if (hweight32(default_sseu.slice_mask) < 2)
1058                 return 0;
1059
1060         /*
1061          * Gen11 VME friendly power-gated configuration with half enabled
1062          * sub-slices.
1063          */
1064         pg_sseu = default_sseu;
1065         pg_sseu.slice_mask = 1;
1066         pg_sseu.subslice_mask =
1067                 ~(~0 << (hweight32(default_sseu.subslice_mask) / 2));
1068
1069         pr_info("SSEU subtest '%s', flags=%x, def_slices=%u, pg_slices=%u\n",
1070                 name, flags, hweight32(default_sseu.slice_mask),
1071                 hweight32(pg_sseu.slice_mask));
1072
1073         file = mock_file(i915);
1074         if (IS_ERR(file))
1075                 return PTR_ERR(file);
1076
1077         if (flags & TEST_RESET)
1078                 igt_global_reset_lock(i915);
1079
1080         mutex_lock(&i915->drm.struct_mutex);
1081
1082         ctx = live_context(i915, file);
1083         if (IS_ERR(ctx)) {
1084                 ret = PTR_ERR(ctx);
1085                 goto out_unlock;
1086         }
1087         i915_gem_context_clear_bannable(ctx); /* to reset and beyond! */
1088
1089         obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
1090         if (IS_ERR(obj)) {
1091                 ret = PTR_ERR(obj);
1092                 goto out_unlock;
1093         }
1094
1095         wakeref = intel_runtime_pm_get(i915);
1096
1097         ce = i915_gem_context_get_engine(ctx, RCS0);
1098         if (IS_ERR(ce)) {
1099                 ret = PTR_ERR(ce);
1100                 goto out_rpm;
1101         }
1102
1103         ret = intel_context_pin(ce);
1104         if (ret)
1105                 goto out_context;
1106
1107         /* First set the default mask. */
1108         ret = __sseu_test(i915, name, flags, ce, obj, default_sseu);
1109         if (ret)
1110                 goto out_fail;
1111
1112         /* Then set a power-gated configuration. */
1113         ret = __sseu_test(i915, name, flags, ce, obj, pg_sseu);
1114         if (ret)
1115                 goto out_fail;
1116
1117         /* Back to defaults. */
1118         ret = __sseu_test(i915, name, flags, ce, obj, default_sseu);
1119         if (ret)
1120                 goto out_fail;
1121
1122         /* One last power-gated configuration for the road. */
1123         ret = __sseu_test(i915, name, flags, ce, obj, pg_sseu);
1124         if (ret)
1125                 goto out_fail;
1126
1127 out_fail:
1128         if (igt_flush_test(i915, I915_WAIT_LOCKED))
1129                 ret = -EIO;
1130
1131         intel_context_unpin(ce);
1132 out_context:
1133         intel_context_put(ce);
1134 out_rpm:
1135         intel_runtime_pm_put(i915, wakeref);
1136         i915_gem_object_put(obj);
1137
1138 out_unlock:
1139         mutex_unlock(&i915->drm.struct_mutex);
1140
1141         if (flags & TEST_RESET)
1142                 igt_global_reset_unlock(i915);
1143
1144         mock_file_free(i915, file);
1145
1146         if (ret)
1147                 pr_err("%s: Failed with %d!\n", name, ret);
1148
1149         return ret;
1150 }
1151
1152 static int igt_ctx_sseu(void *arg)
1153 {
1154         struct {
1155                 const char *name;
1156                 unsigned int flags;
1157         } *phase, phases[] = {
1158                 { .name = "basic", .flags = 0 },
1159                 { .name = "idle", .flags = TEST_IDLE },
1160                 { .name = "busy", .flags = TEST_BUSY },
1161                 { .name = "busy-reset", .flags = TEST_BUSY | TEST_RESET },
1162                 { .name = "busy-idle", .flags = TEST_BUSY | TEST_IDLE },
1163                 { .name = "reset-idle", .flags = TEST_RESET | TEST_IDLE },
1164         };
1165         unsigned int i;
1166         int ret = 0;
1167
1168         for (i = 0, phase = phases; ret == 0 && i < ARRAY_SIZE(phases);
1169              i++, phase++)
1170                 ret = __igt_ctx_sseu(arg, phase->name, phase->flags);
1171
1172         return ret;
1173 }
1174
1175 static int igt_ctx_readonly(void *arg)
1176 {
1177         struct drm_i915_private *i915 = arg;
1178         struct drm_i915_gem_object *obj = NULL;
1179         struct i915_gem_context *ctx;
1180         struct i915_hw_ppgtt *ppgtt;
1181         unsigned long idx, ndwords, dw;
1182         struct igt_live_test t;
1183         struct drm_file *file;
1184         I915_RND_STATE(prng);
1185         IGT_TIMEOUT(end_time);
1186         LIST_HEAD(objects);
1187         int err = -ENODEV;
1188
1189         /*
1190          * Create a few read-only objects (with the occasional writable object)
1191          * and try to write into these object checking that the GPU discards
1192          * any write to a read-only object.
1193          */
1194
1195         file = mock_file(i915);
1196         if (IS_ERR(file))
1197                 return PTR_ERR(file);
1198
1199         mutex_lock(&i915->drm.struct_mutex);
1200
1201         err = igt_live_test_begin(&t, i915, __func__, "");
1202         if (err)
1203                 goto out_unlock;
1204
1205         ctx = live_context(i915, file);
1206         if (IS_ERR(ctx)) {
1207                 err = PTR_ERR(ctx);
1208                 goto out_unlock;
1209         }
1210
1211         ppgtt = ctx->ppgtt ?: i915->mm.aliasing_ppgtt;
1212         if (!ppgtt || !ppgtt->vm.has_read_only) {
1213                 err = 0;
1214                 goto out_unlock;
1215         }
1216
1217         ndwords = 0;
1218         dw = 0;
1219         while (!time_after(jiffies, end_time)) {
1220                 struct intel_engine_cs *engine;
1221                 unsigned int id;
1222
1223                 for_each_engine(engine, i915, id) {
1224                         intel_wakeref_t wakeref;
1225
1226                         if (!intel_engine_can_store_dword(engine))
1227                                 continue;
1228
1229                         if (!obj) {
1230                                 obj = create_test_object(ctx, file, &objects);
1231                                 if (IS_ERR(obj)) {
1232                                         err = PTR_ERR(obj);
1233                                         goto out_unlock;
1234                                 }
1235
1236                                 if (prandom_u32_state(&prng) & 1)
1237                                         i915_gem_object_set_readonly(obj);
1238                         }
1239
1240                         err = 0;
1241                         with_intel_runtime_pm(i915, wakeref)
1242                                 err = gpu_fill(obj, ctx, engine, dw);
1243                         if (err) {
1244                                 pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
1245                                        ndwords, dw, max_dwords(obj),
1246                                        engine->name, ctx->hw_id,
1247                                        yesno(!!ctx->ppgtt), err);
1248                                 goto out_unlock;
1249                         }
1250
1251                         if (++dw == max_dwords(obj)) {
1252                                 obj = NULL;
1253                                 dw = 0;
1254                         }
1255                         ndwords++;
1256                 }
1257         }
1258         pr_info("Submitted %lu dwords (across %u engines)\n",
1259                 ndwords, RUNTIME_INFO(i915)->num_engines);
1260
1261         dw = 0;
1262         idx = 0;
1263         list_for_each_entry(obj, &objects, st_link) {
1264                 unsigned int rem =
1265                         min_t(unsigned int, ndwords - dw, max_dwords(obj));
1266                 unsigned int num_writes;
1267
1268                 num_writes = rem;
1269                 if (i915_gem_object_is_readonly(obj))
1270                         num_writes = 0;
1271
1272                 err = cpu_check(obj, idx++, num_writes);
1273                 if (err)
1274                         break;
1275
1276                 dw += rem;
1277         }
1278
1279 out_unlock:
1280         if (igt_live_test_end(&t))
1281                 err = -EIO;
1282         mutex_unlock(&i915->drm.struct_mutex);
1283
1284         mock_file_free(i915, file);
1285         return err;
1286 }
1287
1288 static int check_scratch(struct i915_gem_context *ctx, u64 offset)
1289 {
1290         struct drm_mm_node *node =
1291                 __drm_mm_interval_first(&ctx->ppgtt->vm.mm,
1292                                         offset, offset + sizeof(u32) - 1);
1293         if (!node || node->start > offset)
1294                 return 0;
1295
1296         GEM_BUG_ON(offset >= node->start + node->size);
1297
1298         pr_err("Target offset 0x%08x_%08x overlaps with a node in the mm!\n",
1299                upper_32_bits(offset), lower_32_bits(offset));
1300         return -EINVAL;
1301 }
1302
1303 static int write_to_scratch(struct i915_gem_context *ctx,
1304                             struct intel_engine_cs *engine,
1305                             u64 offset, u32 value)
1306 {
1307         struct drm_i915_private *i915 = ctx->i915;
1308         struct drm_i915_gem_object *obj;
1309         struct i915_request *rq;
1310         struct i915_vma *vma;
1311         u32 *cmd;
1312         int err;
1313
1314         GEM_BUG_ON(offset < I915_GTT_PAGE_SIZE);
1315
1316         obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
1317         if (IS_ERR(obj))
1318                 return PTR_ERR(obj);
1319
1320         cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
1321         if (IS_ERR(cmd)) {
1322                 err = PTR_ERR(cmd);
1323                 goto err;
1324         }
1325
1326         *cmd++ = MI_STORE_DWORD_IMM_GEN4;
1327         if (INTEL_GEN(i915) >= 8) {
1328                 *cmd++ = lower_32_bits(offset);
1329                 *cmd++ = upper_32_bits(offset);
1330         } else {
1331                 *cmd++ = 0;
1332                 *cmd++ = offset;
1333         }
1334         *cmd++ = value;
1335         *cmd = MI_BATCH_BUFFER_END;
1336         __i915_gem_object_flush_map(obj, 0, 64);
1337         i915_gem_object_unpin_map(obj);
1338
1339         vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL);
1340         if (IS_ERR(vma)) {
1341                 err = PTR_ERR(vma);
1342                 goto err;
1343         }
1344
1345         err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED);
1346         if (err)
1347                 goto err;
1348
1349         err = check_scratch(ctx, offset);
1350         if (err)
1351                 goto err_unpin;
1352
1353         rq = igt_request_alloc(ctx, engine);
1354         if (IS_ERR(rq)) {
1355                 err = PTR_ERR(rq);
1356                 goto err_unpin;
1357         }
1358
1359         err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, 0);
1360         if (err)
1361                 goto err_request;
1362
1363         i915_vma_lock(vma);
1364         err = i915_vma_move_to_active(vma, rq, 0);
1365         i915_vma_unlock(vma);
1366         if (err)
1367                 goto skip_request;
1368
1369         i915_vma_unpin(vma);
1370         i915_vma_close(vma);
1371         i915_vma_put(vma);
1372
1373         i915_request_add(rq);
1374
1375         return 0;
1376
1377 skip_request:
1378         i915_request_skip(rq, err);
1379 err_request:
1380         i915_request_add(rq);
1381 err_unpin:
1382         i915_vma_unpin(vma);
1383 err:
1384         i915_gem_object_put(obj);
1385         return err;
1386 }
1387
1388 static int read_from_scratch(struct i915_gem_context *ctx,
1389                              struct intel_engine_cs *engine,
1390                              u64 offset, u32 *value)
1391 {
1392         struct drm_i915_private *i915 = ctx->i915;
1393         struct drm_i915_gem_object *obj;
1394         const u32 RCS_GPR0 = 0x2600; /* not all engines have their own GPR! */
1395         const u32 result = 0x100;
1396         struct i915_request *rq;
1397         struct i915_vma *vma;
1398         u32 *cmd;
1399         int err;
1400
1401         GEM_BUG_ON(offset < I915_GTT_PAGE_SIZE);
1402
1403         obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
1404         if (IS_ERR(obj))
1405                 return PTR_ERR(obj);
1406
1407         cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
1408         if (IS_ERR(cmd)) {
1409                 err = PTR_ERR(cmd);
1410                 goto err;
1411         }
1412
1413         memset(cmd, POISON_INUSE, PAGE_SIZE);
1414         if (INTEL_GEN(i915) >= 8) {
1415                 *cmd++ = MI_LOAD_REGISTER_MEM_GEN8;
1416                 *cmd++ = RCS_GPR0;
1417                 *cmd++ = lower_32_bits(offset);
1418                 *cmd++ = upper_32_bits(offset);
1419                 *cmd++ = MI_STORE_REGISTER_MEM_GEN8;
1420                 *cmd++ = RCS_GPR0;
1421                 *cmd++ = result;
1422                 *cmd++ = 0;
1423         } else {
1424                 *cmd++ = MI_LOAD_REGISTER_MEM;
1425                 *cmd++ = RCS_GPR0;
1426                 *cmd++ = offset;
1427                 *cmd++ = MI_STORE_REGISTER_MEM;
1428                 *cmd++ = RCS_GPR0;
1429                 *cmd++ = result;
1430         }
1431         *cmd = MI_BATCH_BUFFER_END;
1432
1433         i915_gem_object_flush_map(obj);
1434         i915_gem_object_unpin_map(obj);
1435
1436         vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL);
1437         if (IS_ERR(vma)) {
1438                 err = PTR_ERR(vma);
1439                 goto err;
1440         }
1441
1442         err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED);
1443         if (err)
1444                 goto err;
1445
1446         err = check_scratch(ctx, offset);
1447         if (err)
1448                 goto err_unpin;
1449
1450         rq = igt_request_alloc(ctx, engine);
1451         if (IS_ERR(rq)) {
1452                 err = PTR_ERR(rq);
1453                 goto err_unpin;
1454         }
1455
1456         err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, 0);
1457         if (err)
1458                 goto err_request;
1459
1460         i915_vma_lock(vma);
1461         err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
1462         i915_vma_unlock(vma);
1463         if (err)
1464                 goto skip_request;
1465
1466         i915_vma_unpin(vma);
1467         i915_vma_close(vma);
1468
1469         i915_request_add(rq);
1470
1471         i915_gem_object_lock(obj);
1472         err = i915_gem_object_set_to_cpu_domain(obj, false);
1473         i915_gem_object_unlock(obj);
1474         if (err)
1475                 goto err;
1476
1477         cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
1478         if (IS_ERR(cmd)) {
1479                 err = PTR_ERR(cmd);
1480                 goto err;
1481         }
1482
1483         *value = cmd[result / sizeof(*cmd)];
1484         i915_gem_object_unpin_map(obj);
1485         i915_gem_object_put(obj);
1486
1487         return 0;
1488
1489 skip_request:
1490         i915_request_skip(rq, err);
1491 err_request:
1492         i915_request_add(rq);
1493 err_unpin:
1494         i915_vma_unpin(vma);
1495 err:
1496         i915_gem_object_put(obj);
1497         return err;
1498 }
1499
1500 static int igt_vm_isolation(void *arg)
1501 {
1502         struct drm_i915_private *i915 = arg;
1503         struct i915_gem_context *ctx_a, *ctx_b;
1504         struct intel_engine_cs *engine;
1505         intel_wakeref_t wakeref;
1506         struct igt_live_test t;
1507         struct drm_file *file;
1508         I915_RND_STATE(prng);
1509         unsigned long count;
1510         unsigned int id;
1511         u64 vm_total;
1512         int err;
1513
1514         if (INTEL_GEN(i915) < 7)
1515                 return 0;
1516
1517         /*
1518          * The simple goal here is that a write into one context is not
1519          * observed in a second (separate page tables and scratch).
1520          */
1521
1522         file = mock_file(i915);
1523         if (IS_ERR(file))
1524                 return PTR_ERR(file);
1525
1526         mutex_lock(&i915->drm.struct_mutex);
1527
1528         err = igt_live_test_begin(&t, i915, __func__, "");
1529         if (err)
1530                 goto out_unlock;
1531
1532         ctx_a = live_context(i915, file);
1533         if (IS_ERR(ctx_a)) {
1534                 err = PTR_ERR(ctx_a);
1535                 goto out_unlock;
1536         }
1537
1538         ctx_b = live_context(i915, file);
1539         if (IS_ERR(ctx_b)) {
1540                 err = PTR_ERR(ctx_b);
1541                 goto out_unlock;
1542         }
1543
1544         /* We can only test vm isolation, if the vm are distinct */
1545         if (ctx_a->ppgtt == ctx_b->ppgtt)
1546                 goto out_unlock;
1547
1548         vm_total = ctx_a->ppgtt->vm.total;
1549         GEM_BUG_ON(ctx_b->ppgtt->vm.total != vm_total);
1550         vm_total -= I915_GTT_PAGE_SIZE;
1551
1552         wakeref = intel_runtime_pm_get(i915);
1553
1554         count = 0;
1555         for_each_engine(engine, i915, id) {
1556                 IGT_TIMEOUT(end_time);
1557                 unsigned long this = 0;
1558
1559                 if (!intel_engine_can_store_dword(engine))
1560                         continue;
1561
1562                 while (!__igt_timeout(end_time, NULL)) {
1563                         u32 value = 0xc5c5c5c5;
1564                         u64 offset;
1565
1566                         div64_u64_rem(i915_prandom_u64_state(&prng),
1567                                       vm_total, &offset);
1568                         offset &= -sizeof(u32);
1569                         offset += I915_GTT_PAGE_SIZE;
1570
1571                         err = write_to_scratch(ctx_a, engine,
1572                                                offset, 0xdeadbeef);
1573                         if (err == 0)
1574                                 err = read_from_scratch(ctx_b, engine,
1575                                                         offset, &value);
1576                         if (err)
1577                                 goto out_rpm;
1578
1579                         if (value) {
1580                                 pr_err("%s: Read %08x from scratch (offset 0x%08x_%08x), after %lu reads!\n",
1581                                        engine->name, value,
1582                                        upper_32_bits(offset),
1583                                        lower_32_bits(offset),
1584                                        this);
1585                                 err = -EINVAL;
1586                                 goto out_rpm;
1587                         }
1588
1589                         this++;
1590                 }
1591                 count += this;
1592         }
1593         pr_info("Checked %lu scratch offsets across %d engines\n",
1594                 count, RUNTIME_INFO(i915)->num_engines);
1595
1596 out_rpm:
1597         intel_runtime_pm_put(i915, wakeref);
1598 out_unlock:
1599         if (igt_live_test_end(&t))
1600                 err = -EIO;
1601         mutex_unlock(&i915->drm.struct_mutex);
1602
1603         mock_file_free(i915, file);
1604         return err;
1605 }
1606
1607 static __maybe_unused const char *
1608 __engine_name(struct drm_i915_private *i915, intel_engine_mask_t engines)
1609 {
1610         struct intel_engine_cs *engine;
1611         intel_engine_mask_t tmp;
1612
1613         if (engines == ALL_ENGINES)
1614                 return "all";
1615
1616         for_each_engine_masked(engine, i915, engines, tmp)
1617                 return engine->name;
1618
1619         return "none";
1620 }
1621
1622 static void mock_barrier_task(void *data)
1623 {
1624         unsigned int *counter = data;
1625
1626         ++*counter;
1627 }
1628
1629 static int mock_context_barrier(void *arg)
1630 {
1631 #undef pr_fmt
1632 #define pr_fmt(x) "context_barrier_task():" # x
1633         struct drm_i915_private *i915 = arg;
1634         struct i915_gem_context *ctx;
1635         struct i915_request *rq;
1636         unsigned int counter;
1637         int err;
1638
1639         /*
1640          * The context barrier provides us with a callback after it emits
1641          * a request; useful for retiring old state after loading new.
1642          */
1643
1644         mutex_lock(&i915->drm.struct_mutex);
1645
1646         ctx = mock_context(i915, "mock");
1647         if (!ctx) {
1648                 err = -ENOMEM;
1649                 goto unlock;
1650         }
1651
1652         counter = 0;
1653         err = context_barrier_task(ctx, 0,
1654                                    NULL, mock_barrier_task, &counter);
1655         if (err) {
1656                 pr_err("Failed at line %d, err=%d\n", __LINE__, err);
1657                 goto out;
1658         }
1659         if (counter == 0) {
1660                 pr_err("Did not retire immediately with 0 engines\n");
1661                 err = -EINVAL;
1662                 goto out;
1663         }
1664
1665         counter = 0;
1666         err = context_barrier_task(ctx, ALL_ENGINES,
1667                                    NULL, mock_barrier_task, &counter);
1668         if (err) {
1669                 pr_err("Failed at line %d, err=%d\n", __LINE__, err);
1670                 goto out;
1671         }
1672         if (counter == 0) {
1673                 pr_err("Did not retire immediately for all unused engines\n");
1674                 err = -EINVAL;
1675                 goto out;
1676         }
1677
1678         rq = igt_request_alloc(ctx, i915->engine[RCS0]);
1679         if (IS_ERR(rq)) {
1680                 pr_err("Request allocation failed!\n");
1681                 goto out;
1682         }
1683         i915_request_add(rq);
1684
1685         counter = 0;
1686         context_barrier_inject_fault = BIT(RCS0);
1687         err = context_barrier_task(ctx, ALL_ENGINES,
1688                                    NULL, mock_barrier_task, &counter);
1689         context_barrier_inject_fault = 0;
1690         if (err == -ENXIO)
1691                 err = 0;
1692         else
1693                 pr_err("Did not hit fault injection!\n");
1694         if (counter != 0) {
1695                 pr_err("Invoked callback on error!\n");
1696                 err = -EIO;
1697         }
1698         if (err)
1699                 goto out;
1700
1701         counter = 0;
1702         err = context_barrier_task(ctx, ALL_ENGINES,
1703                                    NULL, mock_barrier_task, &counter);
1704         if (err) {
1705                 pr_err("Failed at line %d, err=%d\n", __LINE__, err);
1706                 goto out;
1707         }
1708         mock_device_flush(i915);
1709         if (counter == 0) {
1710                 pr_err("Did not retire on each active engines\n");
1711                 err = -EINVAL;
1712                 goto out;
1713         }
1714
1715 out:
1716         mock_context_close(ctx);
1717 unlock:
1718         mutex_unlock(&i915->drm.struct_mutex);
1719         return err;
1720 #undef pr_fmt
1721 #define pr_fmt(x) x
1722 }
1723
1724 int i915_gem_context_mock_selftests(void)
1725 {
1726         static const struct i915_subtest tests[] = {
1727                 SUBTEST(mock_context_barrier),
1728         };
1729         struct drm_i915_private *i915;
1730         int err;
1731
1732         i915 = mock_gem_device();
1733         if (!i915)
1734                 return -ENOMEM;
1735
1736         err = i915_subtests(tests, i915);
1737
1738         drm_dev_put(&i915->drm);
1739         return err;
1740 }
1741
1742 int i915_gem_context_live_selftests(struct drm_i915_private *dev_priv)
1743 {
1744         static const struct i915_subtest tests[] = {
1745                 SUBTEST(live_nop_switch),
1746                 SUBTEST(igt_ctx_exec),
1747                 SUBTEST(igt_ctx_readonly),
1748                 SUBTEST(igt_ctx_sseu),
1749                 SUBTEST(igt_shared_ctx_exec),
1750                 SUBTEST(igt_vm_isolation),
1751         };
1752
1753         if (i915_terminally_wedged(dev_priv))
1754                 return 0;
1755
1756         return i915_subtests(tests, dev_priv);
1757 }