]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/gpu/drm/i915/selftests/intel_lrc.c
fbee030db940157c5d5a9218905f0557f44fa9d8
[linux.git] / drivers / gpu / drm / i915 / selftests / intel_lrc.c
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2018 Intel Corporation
5  */
6
7 #include <linux/prime_numbers.h>
8
9 #include "../i915_reset.h"
10
11 #include "../i915_selftest.h"
12 #include "igt_flush_test.h"
13 #include "igt_live_test.h"
14 #include "igt_spinner.h"
15 #include "i915_random.h"
16
17 #include "mock_context.h"
18
19 static int live_sanitycheck(void *arg)
20 {
21         struct drm_i915_private *i915 = arg;
22         struct intel_engine_cs *engine;
23         struct i915_gem_context *ctx;
24         enum intel_engine_id id;
25         struct igt_spinner spin;
26         intel_wakeref_t wakeref;
27         int err = -ENOMEM;
28
29         if (!HAS_LOGICAL_RING_CONTEXTS(i915))
30                 return 0;
31
32         mutex_lock(&i915->drm.struct_mutex);
33         wakeref = intel_runtime_pm_get(i915);
34
35         if (igt_spinner_init(&spin, i915))
36                 goto err_unlock;
37
38         ctx = kernel_context(i915);
39         if (!ctx)
40                 goto err_spin;
41
42         for_each_engine(engine, i915, id) {
43                 struct i915_request *rq;
44
45                 rq = igt_spinner_create_request(&spin, ctx, engine, MI_NOOP);
46                 if (IS_ERR(rq)) {
47                         err = PTR_ERR(rq);
48                         goto err_ctx;
49                 }
50
51                 i915_request_add(rq);
52                 if (!igt_wait_for_spinner(&spin, rq)) {
53                         GEM_TRACE("spinner failed to start\n");
54                         GEM_TRACE_DUMP();
55                         i915_gem_set_wedged(i915);
56                         err = -EIO;
57                         goto err_ctx;
58                 }
59
60                 igt_spinner_end(&spin);
61                 if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
62                         err = -EIO;
63                         goto err_ctx;
64                 }
65         }
66
67         err = 0;
68 err_ctx:
69         kernel_context_close(ctx);
70 err_spin:
71         igt_spinner_fini(&spin);
72 err_unlock:
73         igt_flush_test(i915, I915_WAIT_LOCKED);
74         intel_runtime_pm_put(i915, wakeref);
75         mutex_unlock(&i915->drm.struct_mutex);
76         return err;
77 }
78
79 static int live_busywait_preempt(void *arg)
80 {
81         struct drm_i915_private *i915 = arg;
82         struct i915_gem_context *ctx_hi, *ctx_lo;
83         struct intel_engine_cs *engine;
84         struct drm_i915_gem_object *obj;
85         struct i915_vma *vma;
86         enum intel_engine_id id;
87         intel_wakeref_t wakeref;
88         int err = -ENOMEM;
89         u32 *map;
90
91         /*
92          * Verify that even without HAS_LOGICAL_RING_PREEMPTION, we can
93          * preempt the busywaits used to synchronise between rings.
94          */
95
96         mutex_lock(&i915->drm.struct_mutex);
97         wakeref = intel_runtime_pm_get(i915);
98
99         ctx_hi = kernel_context(i915);
100         if (!ctx_hi)
101                 goto err_unlock;
102         ctx_hi->sched.priority = INT_MAX;
103
104         ctx_lo = kernel_context(i915);
105         if (!ctx_lo)
106                 goto err_ctx_hi;
107         ctx_lo->sched.priority = INT_MIN;
108
109         obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
110         if (IS_ERR(obj)) {
111                 err = PTR_ERR(obj);
112                 goto err_ctx_lo;
113         }
114
115         map = i915_gem_object_pin_map(obj, I915_MAP_WC);
116         if (IS_ERR(map)) {
117                 err = PTR_ERR(map);
118                 goto err_obj;
119         }
120
121         vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
122         if (IS_ERR(vma)) {
123                 err = PTR_ERR(vma);
124                 goto err_map;
125         }
126
127         err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
128         if (err)
129                 goto err_map;
130
131         for_each_engine(engine, i915, id) {
132                 struct i915_request *lo, *hi;
133                 struct igt_live_test t;
134                 u32 *cs;
135
136                 if (!intel_engine_can_store_dword(engine))
137                         continue;
138
139                 if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
140                         err = -EIO;
141                         goto err_vma;
142                 }
143
144                 /*
145                  * We create two requests. The low priority request
146                  * busywaits on a semaphore (inside the ringbuffer where
147                  * is should be preemptible) and the high priority requests
148                  * uses a MI_STORE_DWORD_IMM to update the semaphore value
149                  * allowing the first request to complete. If preemption
150                  * fails, we hang instead.
151                  */
152
153                 lo = i915_request_alloc(engine, ctx_lo);
154                 if (IS_ERR(lo)) {
155                         err = PTR_ERR(lo);
156                         goto err_vma;
157                 }
158
159                 cs = intel_ring_begin(lo, 8);
160                 if (IS_ERR(cs)) {
161                         err = PTR_ERR(cs);
162                         i915_request_add(lo);
163                         goto err_vma;
164                 }
165
166                 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
167                 *cs++ = i915_ggtt_offset(vma);
168                 *cs++ = 0;
169                 *cs++ = 1;
170
171                 /* XXX Do we need a flush + invalidate here? */
172
173                 *cs++ = MI_SEMAPHORE_WAIT |
174                         MI_SEMAPHORE_GLOBAL_GTT |
175                         MI_SEMAPHORE_POLL |
176                         MI_SEMAPHORE_SAD_EQ_SDD;
177                 *cs++ = 0;
178                 *cs++ = i915_ggtt_offset(vma);
179                 *cs++ = 0;
180
181                 intel_ring_advance(lo, cs);
182                 i915_request_add(lo);
183
184                 if (wait_for(READ_ONCE(*map), 10)) {
185                         err = -ETIMEDOUT;
186                         goto err_vma;
187                 }
188
189                 /* Low priority request should be busywaiting now */
190                 if (i915_request_wait(lo, I915_WAIT_LOCKED, 1) != -ETIME) {
191                         pr_err("%s: Busywaiting request did not!\n",
192                                engine->name);
193                         err = -EIO;
194                         goto err_vma;
195                 }
196
197                 hi = i915_request_alloc(engine, ctx_hi);
198                 if (IS_ERR(hi)) {
199                         err = PTR_ERR(hi);
200                         goto err_vma;
201                 }
202
203                 cs = intel_ring_begin(hi, 4);
204                 if (IS_ERR(cs)) {
205                         err = PTR_ERR(cs);
206                         i915_request_add(hi);
207                         goto err_vma;
208                 }
209
210                 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
211                 *cs++ = i915_ggtt_offset(vma);
212                 *cs++ = 0;
213                 *cs++ = 0;
214
215                 intel_ring_advance(hi, cs);
216                 i915_request_add(hi);
217
218                 if (i915_request_wait(lo, I915_WAIT_LOCKED, HZ / 5) < 0) {
219                         struct drm_printer p = drm_info_printer(i915->drm.dev);
220
221                         pr_err("%s: Failed to preempt semaphore busywait!\n",
222                                engine->name);
223
224                         intel_engine_dump(engine, &p, "%s\n", engine->name);
225                         GEM_TRACE_DUMP();
226
227                         i915_gem_set_wedged(i915);
228                         err = -EIO;
229                         goto err_vma;
230                 }
231                 GEM_BUG_ON(READ_ONCE(*map));
232
233                 if (igt_live_test_end(&t)) {
234                         err = -EIO;
235                         goto err_vma;
236                 }
237         }
238
239         err = 0;
240 err_vma:
241         i915_vma_unpin(vma);
242 err_map:
243         i915_gem_object_unpin_map(obj);
244 err_obj:
245         i915_gem_object_put(obj);
246 err_ctx_lo:
247         kernel_context_close(ctx_lo);
248 err_ctx_hi:
249         kernel_context_close(ctx_hi);
250 err_unlock:
251         if (igt_flush_test(i915, I915_WAIT_LOCKED))
252                 err = -EIO;
253         intel_runtime_pm_put(i915, wakeref);
254         mutex_unlock(&i915->drm.struct_mutex);
255         return err;
256 }
257
258 static int live_preempt(void *arg)
259 {
260         struct drm_i915_private *i915 = arg;
261         struct i915_gem_context *ctx_hi, *ctx_lo;
262         struct igt_spinner spin_hi, spin_lo;
263         struct intel_engine_cs *engine;
264         enum intel_engine_id id;
265         intel_wakeref_t wakeref;
266         int err = -ENOMEM;
267
268         if (!HAS_LOGICAL_RING_PREEMPTION(i915))
269                 return 0;
270
271         if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION))
272                 pr_err("Logical preemption supported, but not exposed\n");
273
274         mutex_lock(&i915->drm.struct_mutex);
275         wakeref = intel_runtime_pm_get(i915);
276
277         if (igt_spinner_init(&spin_hi, i915))
278                 goto err_unlock;
279
280         if (igt_spinner_init(&spin_lo, i915))
281                 goto err_spin_hi;
282
283         ctx_hi = kernel_context(i915);
284         if (!ctx_hi)
285                 goto err_spin_lo;
286         ctx_hi->sched.priority =
287                 I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
288
289         ctx_lo = kernel_context(i915);
290         if (!ctx_lo)
291                 goto err_ctx_hi;
292         ctx_lo->sched.priority =
293                 I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
294
295         for_each_engine(engine, i915, id) {
296                 struct igt_live_test t;
297                 struct i915_request *rq;
298
299                 if (!intel_engine_has_preemption(engine))
300                         continue;
301
302                 if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
303                         err = -EIO;
304                         goto err_ctx_lo;
305                 }
306
307                 rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
308                                                 MI_ARB_CHECK);
309                 if (IS_ERR(rq)) {
310                         err = PTR_ERR(rq);
311                         goto err_ctx_lo;
312                 }
313
314                 i915_request_add(rq);
315                 if (!igt_wait_for_spinner(&spin_lo, rq)) {
316                         GEM_TRACE("lo spinner failed to start\n");
317                         GEM_TRACE_DUMP();
318                         i915_gem_set_wedged(i915);
319                         err = -EIO;
320                         goto err_ctx_lo;
321                 }
322
323                 rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine,
324                                                 MI_ARB_CHECK);
325                 if (IS_ERR(rq)) {
326                         igt_spinner_end(&spin_lo);
327                         err = PTR_ERR(rq);
328                         goto err_ctx_lo;
329                 }
330
331                 i915_request_add(rq);
332                 if (!igt_wait_for_spinner(&spin_hi, rq)) {
333                         GEM_TRACE("hi spinner failed to start\n");
334                         GEM_TRACE_DUMP();
335                         i915_gem_set_wedged(i915);
336                         err = -EIO;
337                         goto err_ctx_lo;
338                 }
339
340                 igt_spinner_end(&spin_hi);
341                 igt_spinner_end(&spin_lo);
342
343                 if (igt_live_test_end(&t)) {
344                         err = -EIO;
345                         goto err_ctx_lo;
346                 }
347         }
348
349         err = 0;
350 err_ctx_lo:
351         kernel_context_close(ctx_lo);
352 err_ctx_hi:
353         kernel_context_close(ctx_hi);
354 err_spin_lo:
355         igt_spinner_fini(&spin_lo);
356 err_spin_hi:
357         igt_spinner_fini(&spin_hi);
358 err_unlock:
359         igt_flush_test(i915, I915_WAIT_LOCKED);
360         intel_runtime_pm_put(i915, wakeref);
361         mutex_unlock(&i915->drm.struct_mutex);
362         return err;
363 }
364
365 static int live_late_preempt(void *arg)
366 {
367         struct drm_i915_private *i915 = arg;
368         struct i915_gem_context *ctx_hi, *ctx_lo;
369         struct igt_spinner spin_hi, spin_lo;
370         struct intel_engine_cs *engine;
371         struct i915_sched_attr attr = {};
372         enum intel_engine_id id;
373         intel_wakeref_t wakeref;
374         int err = -ENOMEM;
375
376         if (!HAS_LOGICAL_RING_PREEMPTION(i915))
377                 return 0;
378
379         mutex_lock(&i915->drm.struct_mutex);
380         wakeref = intel_runtime_pm_get(i915);
381
382         if (igt_spinner_init(&spin_hi, i915))
383                 goto err_unlock;
384
385         if (igt_spinner_init(&spin_lo, i915))
386                 goto err_spin_hi;
387
388         ctx_hi = kernel_context(i915);
389         if (!ctx_hi)
390                 goto err_spin_lo;
391
392         ctx_lo = kernel_context(i915);
393         if (!ctx_lo)
394                 goto err_ctx_hi;
395
396         for_each_engine(engine, i915, id) {
397                 struct igt_live_test t;
398                 struct i915_request *rq;
399
400                 if (!intel_engine_has_preemption(engine))
401                         continue;
402
403                 if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
404                         err = -EIO;
405                         goto err_ctx_lo;
406                 }
407
408                 rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
409                                                 MI_ARB_CHECK);
410                 if (IS_ERR(rq)) {
411                         err = PTR_ERR(rq);
412                         goto err_ctx_lo;
413                 }
414
415                 i915_request_add(rq);
416                 if (!igt_wait_for_spinner(&spin_lo, rq)) {
417                         pr_err("First context failed to start\n");
418                         goto err_wedged;
419                 }
420
421                 rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine,
422                                                 MI_NOOP);
423                 if (IS_ERR(rq)) {
424                         igt_spinner_end(&spin_lo);
425                         err = PTR_ERR(rq);
426                         goto err_ctx_lo;
427                 }
428
429                 i915_request_add(rq);
430                 if (igt_wait_for_spinner(&spin_hi, rq)) {
431                         pr_err("Second context overtook first?\n");
432                         goto err_wedged;
433                 }
434
435                 attr.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX);
436                 engine->schedule(rq, &attr);
437
438                 if (!igt_wait_for_spinner(&spin_hi, rq)) {
439                         pr_err("High priority context failed to preempt the low priority context\n");
440                         GEM_TRACE_DUMP();
441                         goto err_wedged;
442                 }
443
444                 igt_spinner_end(&spin_hi);
445                 igt_spinner_end(&spin_lo);
446
447                 if (igt_live_test_end(&t)) {
448                         err = -EIO;
449                         goto err_ctx_lo;
450                 }
451         }
452
453         err = 0;
454 err_ctx_lo:
455         kernel_context_close(ctx_lo);
456 err_ctx_hi:
457         kernel_context_close(ctx_hi);
458 err_spin_lo:
459         igt_spinner_fini(&spin_lo);
460 err_spin_hi:
461         igt_spinner_fini(&spin_hi);
462 err_unlock:
463         igt_flush_test(i915, I915_WAIT_LOCKED);
464         intel_runtime_pm_put(i915, wakeref);
465         mutex_unlock(&i915->drm.struct_mutex);
466         return err;
467
468 err_wedged:
469         igt_spinner_end(&spin_hi);
470         igt_spinner_end(&spin_lo);
471         i915_gem_set_wedged(i915);
472         err = -EIO;
473         goto err_ctx_lo;
474 }
475
476 struct preempt_client {
477         struct igt_spinner spin;
478         struct i915_gem_context *ctx;
479 };
480
481 static int preempt_client_init(struct drm_i915_private *i915,
482                                struct preempt_client *c)
483 {
484         c->ctx = kernel_context(i915);
485         if (!c->ctx)
486                 return -ENOMEM;
487
488         if (igt_spinner_init(&c->spin, i915))
489                 goto err_ctx;
490
491         return 0;
492
493 err_ctx:
494         kernel_context_close(c->ctx);
495         return -ENOMEM;
496 }
497
498 static void preempt_client_fini(struct preempt_client *c)
499 {
500         igt_spinner_fini(&c->spin);
501         kernel_context_close(c->ctx);
502 }
503
504 static int live_suppress_self_preempt(void *arg)
505 {
506         struct drm_i915_private *i915 = arg;
507         struct intel_engine_cs *engine;
508         struct i915_sched_attr attr = {
509                 .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX)
510         };
511         struct preempt_client a, b;
512         enum intel_engine_id id;
513         intel_wakeref_t wakeref;
514         int err = -ENOMEM;
515
516         /*
517          * Verify that if a preemption request does not cause a change in
518          * the current execution order, the preempt-to-idle injection is
519          * skipped and that we do not accidentally apply it after the CS
520          * completion event.
521          */
522
523         if (!HAS_LOGICAL_RING_PREEMPTION(i915))
524                 return 0;
525
526         if (USES_GUC_SUBMISSION(i915))
527                 return 0; /* presume black blox */
528
529         mutex_lock(&i915->drm.struct_mutex);
530         wakeref = intel_runtime_pm_get(i915);
531
532         if (preempt_client_init(i915, &a))
533                 goto err_unlock;
534         if (preempt_client_init(i915, &b))
535                 goto err_client_a;
536
537         for_each_engine(engine, i915, id) {
538                 struct i915_request *rq_a, *rq_b;
539                 int depth;
540
541                 if (!intel_engine_has_preemption(engine))
542                         continue;
543
544                 engine->execlists.preempt_hang.count = 0;
545
546                 rq_a = igt_spinner_create_request(&a.spin,
547                                                   a.ctx, engine,
548                                                   MI_NOOP);
549                 if (IS_ERR(rq_a)) {
550                         err = PTR_ERR(rq_a);
551                         goto err_client_b;
552                 }
553
554                 i915_request_add(rq_a);
555                 if (!igt_wait_for_spinner(&a.spin, rq_a)) {
556                         pr_err("First client failed to start\n");
557                         goto err_wedged;
558                 }
559
560                 for (depth = 0; depth < 8; depth++) {
561                         rq_b = igt_spinner_create_request(&b.spin,
562                                                           b.ctx, engine,
563                                                           MI_NOOP);
564                         if (IS_ERR(rq_b)) {
565                                 err = PTR_ERR(rq_b);
566                                 goto err_client_b;
567                         }
568                         i915_request_add(rq_b);
569
570                         GEM_BUG_ON(i915_request_completed(rq_a));
571                         engine->schedule(rq_a, &attr);
572                         igt_spinner_end(&a.spin);
573
574                         if (!igt_wait_for_spinner(&b.spin, rq_b)) {
575                                 pr_err("Second client failed to start\n");
576                                 goto err_wedged;
577                         }
578
579                         swap(a, b);
580                         rq_a = rq_b;
581                 }
582                 igt_spinner_end(&a.spin);
583
584                 if (engine->execlists.preempt_hang.count) {
585                         pr_err("Preemption recorded x%d, depth %d; should have been suppressed!\n",
586                                engine->execlists.preempt_hang.count,
587                                depth);
588                         err = -EINVAL;
589                         goto err_client_b;
590                 }
591
592                 if (igt_flush_test(i915, I915_WAIT_LOCKED))
593                         goto err_wedged;
594         }
595
596         err = 0;
597 err_client_b:
598         preempt_client_fini(&b);
599 err_client_a:
600         preempt_client_fini(&a);
601 err_unlock:
602         if (igt_flush_test(i915, I915_WAIT_LOCKED))
603                 err = -EIO;
604         intel_runtime_pm_put(i915, wakeref);
605         mutex_unlock(&i915->drm.struct_mutex);
606         return err;
607
608 err_wedged:
609         igt_spinner_end(&b.spin);
610         igt_spinner_end(&a.spin);
611         i915_gem_set_wedged(i915);
612         err = -EIO;
613         goto err_client_b;
614 }
615
616 static int __i915_sw_fence_call
617 dummy_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
618 {
619         return NOTIFY_DONE;
620 }
621
622 static struct i915_request *dummy_request(struct intel_engine_cs *engine)
623 {
624         struct i915_request *rq;
625
626         rq = kzalloc(sizeof(*rq), GFP_KERNEL);
627         if (!rq)
628                 return NULL;
629
630         INIT_LIST_HEAD(&rq->active_list);
631         rq->engine = engine;
632
633         i915_sched_node_init(&rq->sched);
634
635         /* mark this request as permanently incomplete */
636         rq->fence.seqno = 1;
637         BUILD_BUG_ON(sizeof(rq->fence.seqno) != 8); /* upper 32b == 0 */
638         rq->hwsp_seqno = (u32 *)&rq->fence.seqno + 1;
639         GEM_BUG_ON(i915_request_completed(rq));
640
641         i915_sw_fence_init(&rq->submit, dummy_notify);
642         i915_sw_fence_commit(&rq->submit);
643
644         return rq;
645 }
646
647 static void dummy_request_free(struct i915_request *dummy)
648 {
649         i915_request_mark_complete(dummy);
650         i915_sched_node_fini(&dummy->sched);
651         i915_sw_fence_fini(&dummy->submit);
652
653         dma_fence_free(&dummy->fence);
654 }
655
656 static int live_suppress_wait_preempt(void *arg)
657 {
658         struct drm_i915_private *i915 = arg;
659         struct preempt_client client[4];
660         struct intel_engine_cs *engine;
661         enum intel_engine_id id;
662         intel_wakeref_t wakeref;
663         int err = -ENOMEM;
664         int i;
665
666         /*
667          * Waiters are given a little priority nudge, but not enough
668          * to actually cause any preemption. Double check that we do
669          * not needlessly generate preempt-to-idle cycles.
670          */
671
672         if (!HAS_LOGICAL_RING_PREEMPTION(i915))
673                 return 0;
674
675         mutex_lock(&i915->drm.struct_mutex);
676         wakeref = intel_runtime_pm_get(i915);
677
678         if (preempt_client_init(i915, &client[0])) /* ELSP[0] */
679                 goto err_unlock;
680         if (preempt_client_init(i915, &client[1])) /* ELSP[1] */
681                 goto err_client_0;
682         if (preempt_client_init(i915, &client[2])) /* head of queue */
683                 goto err_client_1;
684         if (preempt_client_init(i915, &client[3])) /* bystander */
685                 goto err_client_2;
686
687         for_each_engine(engine, i915, id) {
688                 int depth;
689
690                 if (!intel_engine_has_preemption(engine))
691                         continue;
692
693                 if (!engine->emit_init_breadcrumb)
694                         continue;
695
696                 for (depth = 0; depth < ARRAY_SIZE(client); depth++) {
697                         struct i915_request *rq[ARRAY_SIZE(client)];
698                         struct i915_request *dummy;
699
700                         engine->execlists.preempt_hang.count = 0;
701
702                         dummy = dummy_request(engine);
703                         if (!dummy)
704                                 goto err_client_3;
705
706                         for (i = 0; i < ARRAY_SIZE(client); i++) {
707                                 rq[i] = igt_spinner_create_request(&client[i].spin,
708                                                                    client[i].ctx, engine,
709                                                                    MI_NOOP);
710                                 if (IS_ERR(rq[i])) {
711                                         err = PTR_ERR(rq[i]);
712                                         goto err_wedged;
713                                 }
714
715                                 /* Disable NEWCLIENT promotion */
716                                 __i915_active_request_set(&rq[i]->timeline->last_request,
717                                                           dummy);
718                                 i915_request_add(rq[i]);
719                         }
720
721                         dummy_request_free(dummy);
722
723                         GEM_BUG_ON(i915_request_completed(rq[0]));
724                         if (!igt_wait_for_spinner(&client[0].spin, rq[0])) {
725                                 pr_err("%s: First client failed to start\n",
726                                        engine->name);
727                                 goto err_wedged;
728                         }
729                         GEM_BUG_ON(!i915_request_started(rq[0]));
730
731                         if (i915_request_wait(rq[depth],
732                                               I915_WAIT_LOCKED |
733                                               I915_WAIT_PRIORITY,
734                                               1) != -ETIME) {
735                                 pr_err("%s: Waiter depth:%d completed!\n",
736                                        engine->name, depth);
737                                 goto err_wedged;
738                         }
739
740                         for (i = 0; i < ARRAY_SIZE(client); i++)
741                                 igt_spinner_end(&client[i].spin);
742
743                         if (igt_flush_test(i915, I915_WAIT_LOCKED))
744                                 goto err_wedged;
745
746                         if (engine->execlists.preempt_hang.count) {
747                                 pr_err("%s: Preemption recorded x%d, depth %d; should have been suppressed!\n",
748                                        engine->name,
749                                        engine->execlists.preempt_hang.count,
750                                        depth);
751                                 err = -EINVAL;
752                                 goto err_client_3;
753                         }
754                 }
755         }
756
757         err = 0;
758 err_client_3:
759         preempt_client_fini(&client[3]);
760 err_client_2:
761         preempt_client_fini(&client[2]);
762 err_client_1:
763         preempt_client_fini(&client[1]);
764 err_client_0:
765         preempt_client_fini(&client[0]);
766 err_unlock:
767         if (igt_flush_test(i915, I915_WAIT_LOCKED))
768                 err = -EIO;
769         intel_runtime_pm_put(i915, wakeref);
770         mutex_unlock(&i915->drm.struct_mutex);
771         return err;
772
773 err_wedged:
774         for (i = 0; i < ARRAY_SIZE(client); i++)
775                 igt_spinner_end(&client[i].spin);
776         i915_gem_set_wedged(i915);
777         err = -EIO;
778         goto err_client_3;
779 }
780
781 static int live_chain_preempt(void *arg)
782 {
783         struct drm_i915_private *i915 = arg;
784         struct intel_engine_cs *engine;
785         struct preempt_client hi, lo;
786         enum intel_engine_id id;
787         intel_wakeref_t wakeref;
788         int err = -ENOMEM;
789
790         /*
791          * Build a chain AB...BA between two contexts (A, B) and request
792          * preemption of the last request. It should then complete before
793          * the previously submitted spinner in B.
794          */
795
796         if (!HAS_LOGICAL_RING_PREEMPTION(i915))
797                 return 0;
798
799         mutex_lock(&i915->drm.struct_mutex);
800         wakeref = intel_runtime_pm_get(i915);
801
802         if (preempt_client_init(i915, &hi))
803                 goto err_unlock;
804
805         if (preempt_client_init(i915, &lo))
806                 goto err_client_hi;
807
808         for_each_engine(engine, i915, id) {
809                 struct i915_sched_attr attr = {
810                         .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX),
811                 };
812                 struct igt_live_test t;
813                 struct i915_request *rq;
814                 int ring_size, count, i;
815
816                 if (!intel_engine_has_preemption(engine))
817                         continue;
818
819                 rq = igt_spinner_create_request(&lo.spin,
820                                                 lo.ctx, engine,
821                                                 MI_ARB_CHECK);
822                 if (IS_ERR(rq))
823                         goto err_wedged;
824                 i915_request_add(rq);
825
826                 ring_size = rq->wa_tail - rq->head;
827                 if (ring_size < 0)
828                         ring_size += rq->ring->size;
829                 ring_size = rq->ring->size / ring_size;
830                 pr_debug("%s(%s): Using maximum of %d requests\n",
831                          __func__, engine->name, ring_size);
832
833                 igt_spinner_end(&lo.spin);
834                 if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 2) < 0) {
835                         pr_err("Timed out waiting to flush %s\n", engine->name);
836                         goto err_wedged;
837                 }
838
839                 if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
840                         err = -EIO;
841                         goto err_wedged;
842                 }
843
844                 for_each_prime_number_from(count, 1, ring_size) {
845                         rq = igt_spinner_create_request(&hi.spin,
846                                                         hi.ctx, engine,
847                                                         MI_ARB_CHECK);
848                         if (IS_ERR(rq))
849                                 goto err_wedged;
850                         i915_request_add(rq);
851                         if (!igt_wait_for_spinner(&hi.spin, rq))
852                                 goto err_wedged;
853
854                         rq = igt_spinner_create_request(&lo.spin,
855                                                         lo.ctx, engine,
856                                                         MI_ARB_CHECK);
857                         if (IS_ERR(rq))
858                                 goto err_wedged;
859                         i915_request_add(rq);
860
861                         for (i = 0; i < count; i++) {
862                                 rq = i915_request_alloc(engine, lo.ctx);
863                                 if (IS_ERR(rq))
864                                         goto err_wedged;
865                                 i915_request_add(rq);
866                         }
867
868                         rq = i915_request_alloc(engine, hi.ctx);
869                         if (IS_ERR(rq))
870                                 goto err_wedged;
871                         i915_request_add(rq);
872                         engine->schedule(rq, &attr);
873
874                         igt_spinner_end(&hi.spin);
875                         if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0) {
876                                 struct drm_printer p =
877                                         drm_info_printer(i915->drm.dev);
878
879                                 pr_err("Failed to preempt over chain of %d\n",
880                                        count);
881                                 intel_engine_dump(engine, &p,
882                                                   "%s\n", engine->name);
883                                 goto err_wedged;
884                         }
885                         igt_spinner_end(&lo.spin);
886
887                         rq = i915_request_alloc(engine, lo.ctx);
888                         if (IS_ERR(rq))
889                                 goto err_wedged;
890                         i915_request_add(rq);
891                         if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0) {
892                                 struct drm_printer p =
893                                         drm_info_printer(i915->drm.dev);
894
895                                 pr_err("Failed to flush low priority chain of %d requests\n",
896                                        count);
897                                 intel_engine_dump(engine, &p,
898                                                   "%s\n", engine->name);
899                                 goto err_wedged;
900                         }
901                 }
902
903                 if (igt_live_test_end(&t)) {
904                         err = -EIO;
905                         goto err_wedged;
906                 }
907         }
908
909         err = 0;
910 err_client_lo:
911         preempt_client_fini(&lo);
912 err_client_hi:
913         preempt_client_fini(&hi);
914 err_unlock:
915         if (igt_flush_test(i915, I915_WAIT_LOCKED))
916                 err = -EIO;
917         intel_runtime_pm_put(i915, wakeref);
918         mutex_unlock(&i915->drm.struct_mutex);
919         return err;
920
921 err_wedged:
922         igt_spinner_end(&hi.spin);
923         igt_spinner_end(&lo.spin);
924         i915_gem_set_wedged(i915);
925         err = -EIO;
926         goto err_client_lo;
927 }
928
929 static int live_preempt_hang(void *arg)
930 {
931         struct drm_i915_private *i915 = arg;
932         struct i915_gem_context *ctx_hi, *ctx_lo;
933         struct igt_spinner spin_hi, spin_lo;
934         struct intel_engine_cs *engine;
935         enum intel_engine_id id;
936         intel_wakeref_t wakeref;
937         int err = -ENOMEM;
938
939         if (!HAS_LOGICAL_RING_PREEMPTION(i915))
940                 return 0;
941
942         if (!intel_has_reset_engine(i915))
943                 return 0;
944
945         mutex_lock(&i915->drm.struct_mutex);
946         wakeref = intel_runtime_pm_get(i915);
947
948         if (igt_spinner_init(&spin_hi, i915))
949                 goto err_unlock;
950
951         if (igt_spinner_init(&spin_lo, i915))
952                 goto err_spin_hi;
953
954         ctx_hi = kernel_context(i915);
955         if (!ctx_hi)
956                 goto err_spin_lo;
957         ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY;
958
959         ctx_lo = kernel_context(i915);
960         if (!ctx_lo)
961                 goto err_ctx_hi;
962         ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY;
963
964         for_each_engine(engine, i915, id) {
965                 struct i915_request *rq;
966
967                 if (!intel_engine_has_preemption(engine))
968                         continue;
969
970                 rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
971                                                 MI_ARB_CHECK);
972                 if (IS_ERR(rq)) {
973                         err = PTR_ERR(rq);
974                         goto err_ctx_lo;
975                 }
976
977                 i915_request_add(rq);
978                 if (!igt_wait_for_spinner(&spin_lo, rq)) {
979                         GEM_TRACE("lo spinner failed to start\n");
980                         GEM_TRACE_DUMP();
981                         i915_gem_set_wedged(i915);
982                         err = -EIO;
983                         goto err_ctx_lo;
984                 }
985
986                 rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine,
987                                                 MI_ARB_CHECK);
988                 if (IS_ERR(rq)) {
989                         igt_spinner_end(&spin_lo);
990                         err = PTR_ERR(rq);
991                         goto err_ctx_lo;
992                 }
993
994                 init_completion(&engine->execlists.preempt_hang.completion);
995                 engine->execlists.preempt_hang.inject_hang = true;
996
997                 i915_request_add(rq);
998
999                 if (!wait_for_completion_timeout(&engine->execlists.preempt_hang.completion,
1000                                                  HZ / 10)) {
1001                         pr_err("Preemption did not occur within timeout!");
1002                         GEM_TRACE_DUMP();
1003                         i915_gem_set_wedged(i915);
1004                         err = -EIO;
1005                         goto err_ctx_lo;
1006                 }
1007
1008                 set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
1009                 i915_reset_engine(engine, NULL);
1010                 clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
1011
1012                 engine->execlists.preempt_hang.inject_hang = false;
1013
1014                 if (!igt_wait_for_spinner(&spin_hi, rq)) {
1015                         GEM_TRACE("hi spinner failed to start\n");
1016                         GEM_TRACE_DUMP();
1017                         i915_gem_set_wedged(i915);
1018                         err = -EIO;
1019                         goto err_ctx_lo;
1020                 }
1021
1022                 igt_spinner_end(&spin_hi);
1023                 igt_spinner_end(&spin_lo);
1024                 if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
1025                         err = -EIO;
1026                         goto err_ctx_lo;
1027                 }
1028         }
1029
1030         err = 0;
1031 err_ctx_lo:
1032         kernel_context_close(ctx_lo);
1033 err_ctx_hi:
1034         kernel_context_close(ctx_hi);
1035 err_spin_lo:
1036         igt_spinner_fini(&spin_lo);
1037 err_spin_hi:
1038         igt_spinner_fini(&spin_hi);
1039 err_unlock:
1040         igt_flush_test(i915, I915_WAIT_LOCKED);
1041         intel_runtime_pm_put(i915, wakeref);
1042         mutex_unlock(&i915->drm.struct_mutex);
1043         return err;
1044 }
1045
1046 static int random_range(struct rnd_state *rnd, int min, int max)
1047 {
1048         return i915_prandom_u32_max_state(max - min, rnd) + min;
1049 }
1050
1051 static int random_priority(struct rnd_state *rnd)
1052 {
1053         return random_range(rnd, I915_PRIORITY_MIN, I915_PRIORITY_MAX);
1054 }
1055
1056 struct preempt_smoke {
1057         struct drm_i915_private *i915;
1058         struct i915_gem_context **contexts;
1059         struct intel_engine_cs *engine;
1060         struct drm_i915_gem_object *batch;
1061         unsigned int ncontext;
1062         struct rnd_state prng;
1063         unsigned long count;
1064 };
1065
1066 static struct i915_gem_context *smoke_context(struct preempt_smoke *smoke)
1067 {
1068         return smoke->contexts[i915_prandom_u32_max_state(smoke->ncontext,
1069                                                           &smoke->prng)];
1070 }
1071
1072 static int smoke_submit(struct preempt_smoke *smoke,
1073                         struct i915_gem_context *ctx, int prio,
1074                         struct drm_i915_gem_object *batch)
1075 {
1076         struct i915_request *rq;
1077         struct i915_vma *vma = NULL;
1078         int err = 0;
1079
1080         if (batch) {
1081                 vma = i915_vma_instance(batch, &ctx->ppgtt->vm, NULL);
1082                 if (IS_ERR(vma))
1083                         return PTR_ERR(vma);
1084
1085                 err = i915_vma_pin(vma, 0, 0, PIN_USER);
1086                 if (err)
1087                         return err;
1088         }
1089
1090         ctx->sched.priority = prio;
1091
1092         rq = i915_request_alloc(smoke->engine, ctx);
1093         if (IS_ERR(rq)) {
1094                 err = PTR_ERR(rq);
1095                 goto unpin;
1096         }
1097
1098         if (vma) {
1099                 err = rq->engine->emit_bb_start(rq,
1100                                                 vma->node.start,
1101                                                 PAGE_SIZE, 0);
1102                 if (!err)
1103                         err = i915_vma_move_to_active(vma, rq, 0);
1104         }
1105
1106         i915_request_add(rq);
1107
1108 unpin:
1109         if (vma)
1110                 i915_vma_unpin(vma);
1111
1112         return err;
1113 }
1114
1115 static int smoke_crescendo_thread(void *arg)
1116 {
1117         struct preempt_smoke *smoke = arg;
1118         IGT_TIMEOUT(end_time);
1119         unsigned long count;
1120
1121         count = 0;
1122         do {
1123                 struct i915_gem_context *ctx = smoke_context(smoke);
1124                 int err;
1125
1126                 mutex_lock(&smoke->i915->drm.struct_mutex);
1127                 err = smoke_submit(smoke,
1128                                    ctx, count % I915_PRIORITY_MAX,
1129                                    smoke->batch);
1130                 mutex_unlock(&smoke->i915->drm.struct_mutex);
1131                 if (err)
1132                         return err;
1133
1134                 count++;
1135         } while (!__igt_timeout(end_time, NULL));
1136
1137         smoke->count = count;
1138         return 0;
1139 }
1140
1141 static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags)
1142 #define BATCH BIT(0)
1143 {
1144         struct task_struct *tsk[I915_NUM_ENGINES] = {};
1145         struct preempt_smoke arg[I915_NUM_ENGINES];
1146         struct intel_engine_cs *engine;
1147         enum intel_engine_id id;
1148         unsigned long count;
1149         int err = 0;
1150
1151         mutex_unlock(&smoke->i915->drm.struct_mutex);
1152
1153         for_each_engine(engine, smoke->i915, id) {
1154                 arg[id] = *smoke;
1155                 arg[id].engine = engine;
1156                 if (!(flags & BATCH))
1157                         arg[id].batch = NULL;
1158                 arg[id].count = 0;
1159
1160                 tsk[id] = kthread_run(smoke_crescendo_thread, &arg,
1161                                       "igt/smoke:%d", id);
1162                 if (IS_ERR(tsk[id])) {
1163                         err = PTR_ERR(tsk[id]);
1164                         break;
1165                 }
1166                 get_task_struct(tsk[id]);
1167         }
1168
1169         count = 0;
1170         for_each_engine(engine, smoke->i915, id) {
1171                 int status;
1172
1173                 if (IS_ERR_OR_NULL(tsk[id]))
1174                         continue;
1175
1176                 status = kthread_stop(tsk[id]);
1177                 if (status && !err)
1178                         err = status;
1179
1180                 count += arg[id].count;
1181
1182                 put_task_struct(tsk[id]);
1183         }
1184
1185         mutex_lock(&smoke->i915->drm.struct_mutex);
1186
1187         pr_info("Submitted %lu crescendo:%x requests across %d engines and %d contexts\n",
1188                 count, flags,
1189                 RUNTIME_INFO(smoke->i915)->num_engines, smoke->ncontext);
1190         return 0;
1191 }
1192
1193 static int smoke_random(struct preempt_smoke *smoke, unsigned int flags)
1194 {
1195         enum intel_engine_id id;
1196         IGT_TIMEOUT(end_time);
1197         unsigned long count;
1198
1199         count = 0;
1200         do {
1201                 for_each_engine(smoke->engine, smoke->i915, id) {
1202                         struct i915_gem_context *ctx = smoke_context(smoke);
1203                         int err;
1204
1205                         err = smoke_submit(smoke,
1206                                            ctx, random_priority(&smoke->prng),
1207                                            flags & BATCH ? smoke->batch : NULL);
1208                         if (err)
1209                                 return err;
1210
1211                         count++;
1212                 }
1213         } while (!__igt_timeout(end_time, NULL));
1214
1215         pr_info("Submitted %lu random:%x requests across %d engines and %d contexts\n",
1216                 count, flags,
1217                 RUNTIME_INFO(smoke->i915)->num_engines, smoke->ncontext);
1218         return 0;
1219 }
1220
1221 static int live_preempt_smoke(void *arg)
1222 {
1223         struct preempt_smoke smoke = {
1224                 .i915 = arg,
1225                 .prng = I915_RND_STATE_INITIALIZER(i915_selftest.random_seed),
1226                 .ncontext = 1024,
1227         };
1228         const unsigned int phase[] = { 0, BATCH };
1229         intel_wakeref_t wakeref;
1230         struct igt_live_test t;
1231         int err = -ENOMEM;
1232         u32 *cs;
1233         int n;
1234
1235         if (!HAS_LOGICAL_RING_PREEMPTION(smoke.i915))
1236                 return 0;
1237
1238         smoke.contexts = kmalloc_array(smoke.ncontext,
1239                                        sizeof(*smoke.contexts),
1240                                        GFP_KERNEL);
1241         if (!smoke.contexts)
1242                 return -ENOMEM;
1243
1244         mutex_lock(&smoke.i915->drm.struct_mutex);
1245         wakeref = intel_runtime_pm_get(smoke.i915);
1246
1247         smoke.batch = i915_gem_object_create_internal(smoke.i915, PAGE_SIZE);
1248         if (IS_ERR(smoke.batch)) {
1249                 err = PTR_ERR(smoke.batch);
1250                 goto err_unlock;
1251         }
1252
1253         cs = i915_gem_object_pin_map(smoke.batch, I915_MAP_WB);
1254         if (IS_ERR(cs)) {
1255                 err = PTR_ERR(cs);
1256                 goto err_batch;
1257         }
1258         for (n = 0; n < PAGE_SIZE / sizeof(*cs) - 1; n++)
1259                 cs[n] = MI_ARB_CHECK;
1260         cs[n] = MI_BATCH_BUFFER_END;
1261         i915_gem_object_flush_map(smoke.batch);
1262         i915_gem_object_unpin_map(smoke.batch);
1263
1264         if (igt_live_test_begin(&t, smoke.i915, __func__, "all")) {
1265                 err = -EIO;
1266                 goto err_batch;
1267         }
1268
1269         for (n = 0; n < smoke.ncontext; n++) {
1270                 smoke.contexts[n] = kernel_context(smoke.i915);
1271                 if (!smoke.contexts[n])
1272                         goto err_ctx;
1273         }
1274
1275         for (n = 0; n < ARRAY_SIZE(phase); n++) {
1276                 err = smoke_crescendo(&smoke, phase[n]);
1277                 if (err)
1278                         goto err_ctx;
1279
1280                 err = smoke_random(&smoke, phase[n]);
1281                 if (err)
1282                         goto err_ctx;
1283         }
1284
1285 err_ctx:
1286         if (igt_live_test_end(&t))
1287                 err = -EIO;
1288
1289         for (n = 0; n < smoke.ncontext; n++) {
1290                 if (!smoke.contexts[n])
1291                         break;
1292                 kernel_context_close(smoke.contexts[n]);
1293         }
1294
1295 err_batch:
1296         i915_gem_object_put(smoke.batch);
1297 err_unlock:
1298         intel_runtime_pm_put(smoke.i915, wakeref);
1299         mutex_unlock(&smoke.i915->drm.struct_mutex);
1300         kfree(smoke.contexts);
1301
1302         return err;
1303 }
1304
1305 int intel_execlists_live_selftests(struct drm_i915_private *i915)
1306 {
1307         static const struct i915_subtest tests[] = {
1308                 SUBTEST(live_sanitycheck),
1309                 SUBTEST(live_busywait_preempt),
1310                 SUBTEST(live_preempt),
1311                 SUBTEST(live_late_preempt),
1312                 SUBTEST(live_suppress_self_preempt),
1313                 SUBTEST(live_suppress_wait_preempt),
1314                 SUBTEST(live_chain_preempt),
1315                 SUBTEST(live_preempt_hang),
1316                 SUBTEST(live_preempt_smoke),
1317         };
1318
1319         if (!HAS_EXECLISTS(i915))
1320                 return 0;
1321
1322         if (i915_terminally_wedged(i915))
1323                 return 0;
1324
1325         return i915_subtests(tests, i915);
1326 }