]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/gpu/drm/i915/selftests/intel_lrc.c
drm/i915: Bump signaler priority on adding a waiter
[linux.git] / drivers / gpu / drm / i915 / selftests / intel_lrc.c
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2018 Intel Corporation
5  */
6
7 #include <linux/prime_numbers.h>
8
9 #include "../i915_reset.h"
10
11 #include "../i915_selftest.h"
12 #include "igt_flush_test.h"
13 #include "igt_live_test.h"
14 #include "igt_spinner.h"
15 #include "i915_random.h"
16
17 #include "mock_context.h"
18
19 static int live_sanitycheck(void *arg)
20 {
21         struct drm_i915_private *i915 = arg;
22         struct intel_engine_cs *engine;
23         struct i915_gem_context *ctx;
24         enum intel_engine_id id;
25         struct igt_spinner spin;
26         intel_wakeref_t wakeref;
27         int err = -ENOMEM;
28
29         if (!HAS_LOGICAL_RING_CONTEXTS(i915))
30                 return 0;
31
32         mutex_lock(&i915->drm.struct_mutex);
33         wakeref = intel_runtime_pm_get(i915);
34
35         if (igt_spinner_init(&spin, i915))
36                 goto err_unlock;
37
38         ctx = kernel_context(i915);
39         if (!ctx)
40                 goto err_spin;
41
42         for_each_engine(engine, i915, id) {
43                 struct i915_request *rq;
44
45                 rq = igt_spinner_create_request(&spin, ctx, engine, MI_NOOP);
46                 if (IS_ERR(rq)) {
47                         err = PTR_ERR(rq);
48                         goto err_ctx;
49                 }
50
51                 i915_request_add(rq);
52                 if (!igt_wait_for_spinner(&spin, rq)) {
53                         GEM_TRACE("spinner failed to start\n");
54                         GEM_TRACE_DUMP();
55                         i915_gem_set_wedged(i915);
56                         err = -EIO;
57                         goto err_ctx;
58                 }
59
60                 igt_spinner_end(&spin);
61                 if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
62                         err = -EIO;
63                         goto err_ctx;
64                 }
65         }
66
67         err = 0;
68 err_ctx:
69         kernel_context_close(ctx);
70 err_spin:
71         igt_spinner_fini(&spin);
72 err_unlock:
73         igt_flush_test(i915, I915_WAIT_LOCKED);
74         intel_runtime_pm_put(i915, wakeref);
75         mutex_unlock(&i915->drm.struct_mutex);
76         return err;
77 }
78
79 static int live_busywait_preempt(void *arg)
80 {
81         struct drm_i915_private *i915 = arg;
82         struct i915_gem_context *ctx_hi, *ctx_lo;
83         struct intel_engine_cs *engine;
84         struct drm_i915_gem_object *obj;
85         struct i915_vma *vma;
86         enum intel_engine_id id;
87         intel_wakeref_t wakeref;
88         int err = -ENOMEM;
89         u32 *map;
90
91         /*
92          * Verify that even without HAS_LOGICAL_RING_PREEMPTION, we can
93          * preempt the busywaits used to synchronise between rings.
94          */
95
96         mutex_lock(&i915->drm.struct_mutex);
97         wakeref = intel_runtime_pm_get(i915);
98
99         ctx_hi = kernel_context(i915);
100         if (!ctx_hi)
101                 goto err_unlock;
102         ctx_hi->sched.priority =
103                 I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
104
105         ctx_lo = kernel_context(i915);
106         if (!ctx_lo)
107                 goto err_ctx_hi;
108         ctx_lo->sched.priority =
109                 I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
110
111         obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
112         if (IS_ERR(obj)) {
113                 err = PTR_ERR(obj);
114                 goto err_ctx_lo;
115         }
116
117         map = i915_gem_object_pin_map(obj, I915_MAP_WC);
118         if (IS_ERR(map)) {
119                 err = PTR_ERR(map);
120                 goto err_obj;
121         }
122
123         vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
124         if (IS_ERR(vma)) {
125                 err = PTR_ERR(vma);
126                 goto err_map;
127         }
128
129         err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
130         if (err)
131                 goto err_map;
132
133         for_each_engine(engine, i915, id) {
134                 struct i915_request *lo, *hi;
135                 struct igt_live_test t;
136                 u32 *cs;
137
138                 if (!intel_engine_can_store_dword(engine))
139                         continue;
140
141                 if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
142                         err = -EIO;
143                         goto err_vma;
144                 }
145
146                 /*
147                  * We create two requests. The low priority request
148                  * busywaits on a semaphore (inside the ringbuffer where
149                  * is should be preemptible) and the high priority requests
150                  * uses a MI_STORE_DWORD_IMM to update the semaphore value
151                  * allowing the first request to complete. If preemption
152                  * fails, we hang instead.
153                  */
154
155                 lo = i915_request_alloc(engine, ctx_lo);
156                 if (IS_ERR(lo)) {
157                         err = PTR_ERR(lo);
158                         goto err_vma;
159                 }
160
161                 cs = intel_ring_begin(lo, 8);
162                 if (IS_ERR(cs)) {
163                         err = PTR_ERR(cs);
164                         i915_request_add(lo);
165                         goto err_vma;
166                 }
167
168                 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
169                 *cs++ = i915_ggtt_offset(vma);
170                 *cs++ = 0;
171                 *cs++ = 1;
172
173                 /* XXX Do we need a flush + invalidate here? */
174
175                 *cs++ = MI_SEMAPHORE_WAIT |
176                         MI_SEMAPHORE_GLOBAL_GTT |
177                         MI_SEMAPHORE_POLL |
178                         MI_SEMAPHORE_SAD_EQ_SDD;
179                 *cs++ = 0;
180                 *cs++ = i915_ggtt_offset(vma);
181                 *cs++ = 0;
182
183                 intel_ring_advance(lo, cs);
184                 i915_request_add(lo);
185
186                 if (wait_for(READ_ONCE(*map), 10)) {
187                         err = -ETIMEDOUT;
188                         goto err_vma;
189                 }
190
191                 /* Low priority request should be busywaiting now */
192                 if (i915_request_wait(lo, I915_WAIT_LOCKED, 1) != -ETIME) {
193                         pr_err("%s: Busywaiting request did not!\n",
194                                engine->name);
195                         err = -EIO;
196                         goto err_vma;
197                 }
198
199                 hi = i915_request_alloc(engine, ctx_hi);
200                 if (IS_ERR(hi)) {
201                         err = PTR_ERR(hi);
202                         goto err_vma;
203                 }
204
205                 cs = intel_ring_begin(hi, 4);
206                 if (IS_ERR(cs)) {
207                         err = PTR_ERR(cs);
208                         i915_request_add(hi);
209                         goto err_vma;
210                 }
211
212                 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
213                 *cs++ = i915_ggtt_offset(vma);
214                 *cs++ = 0;
215                 *cs++ = 0;
216
217                 intel_ring_advance(hi, cs);
218                 i915_request_add(hi);
219
220                 if (i915_request_wait(lo, I915_WAIT_LOCKED, HZ / 5) < 0) {
221                         struct drm_printer p = drm_info_printer(i915->drm.dev);
222
223                         pr_err("%s: Failed to preempt semaphore busywait!\n",
224                                engine->name);
225
226                         intel_engine_dump(engine, &p, "%s\n", engine->name);
227                         GEM_TRACE_DUMP();
228
229                         i915_gem_set_wedged(i915);
230                         err = -EIO;
231                         goto err_vma;
232                 }
233                 GEM_BUG_ON(READ_ONCE(*map));
234
235                 if (igt_live_test_end(&t)) {
236                         err = -EIO;
237                         goto err_vma;
238                 }
239         }
240
241         err = 0;
242 err_vma:
243         i915_vma_unpin(vma);
244 err_map:
245         i915_gem_object_unpin_map(obj);
246 err_obj:
247         i915_gem_object_put(obj);
248 err_ctx_lo:
249         kernel_context_close(ctx_lo);
250 err_ctx_hi:
251         kernel_context_close(ctx_hi);
252 err_unlock:
253         if (igt_flush_test(i915, I915_WAIT_LOCKED))
254                 err = -EIO;
255         intel_runtime_pm_put(i915, wakeref);
256         mutex_unlock(&i915->drm.struct_mutex);
257         return err;
258 }
259
260 static int live_preempt(void *arg)
261 {
262         struct drm_i915_private *i915 = arg;
263         struct i915_gem_context *ctx_hi, *ctx_lo;
264         struct igt_spinner spin_hi, spin_lo;
265         struct intel_engine_cs *engine;
266         enum intel_engine_id id;
267         intel_wakeref_t wakeref;
268         int err = -ENOMEM;
269
270         if (!HAS_LOGICAL_RING_PREEMPTION(i915))
271                 return 0;
272
273         if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION))
274                 pr_err("Logical preemption supported, but not exposed\n");
275
276         mutex_lock(&i915->drm.struct_mutex);
277         wakeref = intel_runtime_pm_get(i915);
278
279         if (igt_spinner_init(&spin_hi, i915))
280                 goto err_unlock;
281
282         if (igt_spinner_init(&spin_lo, i915))
283                 goto err_spin_hi;
284
285         ctx_hi = kernel_context(i915);
286         if (!ctx_hi)
287                 goto err_spin_lo;
288         ctx_hi->sched.priority =
289                 I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
290
291         ctx_lo = kernel_context(i915);
292         if (!ctx_lo)
293                 goto err_ctx_hi;
294         ctx_lo->sched.priority =
295                 I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
296
297         for_each_engine(engine, i915, id) {
298                 struct igt_live_test t;
299                 struct i915_request *rq;
300
301                 if (!intel_engine_has_preemption(engine))
302                         continue;
303
304                 if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
305                         err = -EIO;
306                         goto err_ctx_lo;
307                 }
308
309                 rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
310                                                 MI_ARB_CHECK);
311                 if (IS_ERR(rq)) {
312                         err = PTR_ERR(rq);
313                         goto err_ctx_lo;
314                 }
315
316                 i915_request_add(rq);
317                 if (!igt_wait_for_spinner(&spin_lo, rq)) {
318                         GEM_TRACE("lo spinner failed to start\n");
319                         GEM_TRACE_DUMP();
320                         i915_gem_set_wedged(i915);
321                         err = -EIO;
322                         goto err_ctx_lo;
323                 }
324
325                 rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine,
326                                                 MI_ARB_CHECK);
327                 if (IS_ERR(rq)) {
328                         igt_spinner_end(&spin_lo);
329                         err = PTR_ERR(rq);
330                         goto err_ctx_lo;
331                 }
332
333                 i915_request_add(rq);
334                 if (!igt_wait_for_spinner(&spin_hi, rq)) {
335                         GEM_TRACE("hi spinner failed to start\n");
336                         GEM_TRACE_DUMP();
337                         i915_gem_set_wedged(i915);
338                         err = -EIO;
339                         goto err_ctx_lo;
340                 }
341
342                 igt_spinner_end(&spin_hi);
343                 igt_spinner_end(&spin_lo);
344
345                 if (igt_live_test_end(&t)) {
346                         err = -EIO;
347                         goto err_ctx_lo;
348                 }
349         }
350
351         err = 0;
352 err_ctx_lo:
353         kernel_context_close(ctx_lo);
354 err_ctx_hi:
355         kernel_context_close(ctx_hi);
356 err_spin_lo:
357         igt_spinner_fini(&spin_lo);
358 err_spin_hi:
359         igt_spinner_fini(&spin_hi);
360 err_unlock:
361         igt_flush_test(i915, I915_WAIT_LOCKED);
362         intel_runtime_pm_put(i915, wakeref);
363         mutex_unlock(&i915->drm.struct_mutex);
364         return err;
365 }
366
367 static int live_late_preempt(void *arg)
368 {
369         struct drm_i915_private *i915 = arg;
370         struct i915_gem_context *ctx_hi, *ctx_lo;
371         struct igt_spinner spin_hi, spin_lo;
372         struct intel_engine_cs *engine;
373         struct i915_sched_attr attr = {};
374         enum intel_engine_id id;
375         intel_wakeref_t wakeref;
376         int err = -ENOMEM;
377
378         if (!HAS_LOGICAL_RING_PREEMPTION(i915))
379                 return 0;
380
381         mutex_lock(&i915->drm.struct_mutex);
382         wakeref = intel_runtime_pm_get(i915);
383
384         if (igt_spinner_init(&spin_hi, i915))
385                 goto err_unlock;
386
387         if (igt_spinner_init(&spin_lo, i915))
388                 goto err_spin_hi;
389
390         ctx_hi = kernel_context(i915);
391         if (!ctx_hi)
392                 goto err_spin_lo;
393
394         ctx_lo = kernel_context(i915);
395         if (!ctx_lo)
396                 goto err_ctx_hi;
397
398         for_each_engine(engine, i915, id) {
399                 struct igt_live_test t;
400                 struct i915_request *rq;
401
402                 if (!intel_engine_has_preemption(engine))
403                         continue;
404
405                 if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
406                         err = -EIO;
407                         goto err_ctx_lo;
408                 }
409
410                 rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
411                                                 MI_ARB_CHECK);
412                 if (IS_ERR(rq)) {
413                         err = PTR_ERR(rq);
414                         goto err_ctx_lo;
415                 }
416
417                 i915_request_add(rq);
418                 if (!igt_wait_for_spinner(&spin_lo, rq)) {
419                         pr_err("First context failed to start\n");
420                         goto err_wedged;
421                 }
422
423                 rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine,
424                                                 MI_NOOP);
425                 if (IS_ERR(rq)) {
426                         igt_spinner_end(&spin_lo);
427                         err = PTR_ERR(rq);
428                         goto err_ctx_lo;
429                 }
430
431                 i915_request_add(rq);
432                 if (igt_wait_for_spinner(&spin_hi, rq)) {
433                         pr_err("Second context overtook first?\n");
434                         goto err_wedged;
435                 }
436
437                 attr.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX);
438                 engine->schedule(rq, &attr);
439
440                 if (!igt_wait_for_spinner(&spin_hi, rq)) {
441                         pr_err("High priority context failed to preempt the low priority context\n");
442                         GEM_TRACE_DUMP();
443                         goto err_wedged;
444                 }
445
446                 igt_spinner_end(&spin_hi);
447                 igt_spinner_end(&spin_lo);
448
449                 if (igt_live_test_end(&t)) {
450                         err = -EIO;
451                         goto err_ctx_lo;
452                 }
453         }
454
455         err = 0;
456 err_ctx_lo:
457         kernel_context_close(ctx_lo);
458 err_ctx_hi:
459         kernel_context_close(ctx_hi);
460 err_spin_lo:
461         igt_spinner_fini(&spin_lo);
462 err_spin_hi:
463         igt_spinner_fini(&spin_hi);
464 err_unlock:
465         igt_flush_test(i915, I915_WAIT_LOCKED);
466         intel_runtime_pm_put(i915, wakeref);
467         mutex_unlock(&i915->drm.struct_mutex);
468         return err;
469
470 err_wedged:
471         igt_spinner_end(&spin_hi);
472         igt_spinner_end(&spin_lo);
473         i915_gem_set_wedged(i915);
474         err = -EIO;
475         goto err_ctx_lo;
476 }
477
478 struct preempt_client {
479         struct igt_spinner spin;
480         struct i915_gem_context *ctx;
481 };
482
483 static int preempt_client_init(struct drm_i915_private *i915,
484                                struct preempt_client *c)
485 {
486         c->ctx = kernel_context(i915);
487         if (!c->ctx)
488                 return -ENOMEM;
489
490         if (igt_spinner_init(&c->spin, i915))
491                 goto err_ctx;
492
493         return 0;
494
495 err_ctx:
496         kernel_context_close(c->ctx);
497         return -ENOMEM;
498 }
499
500 static void preempt_client_fini(struct preempt_client *c)
501 {
502         igt_spinner_fini(&c->spin);
503         kernel_context_close(c->ctx);
504 }
505
506 static int live_suppress_self_preempt(void *arg)
507 {
508         struct drm_i915_private *i915 = arg;
509         struct intel_engine_cs *engine;
510         struct i915_sched_attr attr = {
511                 .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX)
512         };
513         struct preempt_client a, b;
514         enum intel_engine_id id;
515         intel_wakeref_t wakeref;
516         int err = -ENOMEM;
517
518         /*
519          * Verify that if a preemption request does not cause a change in
520          * the current execution order, the preempt-to-idle injection is
521          * skipped and that we do not accidentally apply it after the CS
522          * completion event.
523          */
524
525         if (!HAS_LOGICAL_RING_PREEMPTION(i915))
526                 return 0;
527
528         if (USES_GUC_SUBMISSION(i915))
529                 return 0; /* presume black blox */
530
531         mutex_lock(&i915->drm.struct_mutex);
532         wakeref = intel_runtime_pm_get(i915);
533
534         if (preempt_client_init(i915, &a))
535                 goto err_unlock;
536         if (preempt_client_init(i915, &b))
537                 goto err_client_a;
538
539         for_each_engine(engine, i915, id) {
540                 struct i915_request *rq_a, *rq_b;
541                 int depth;
542
543                 if (!intel_engine_has_preemption(engine))
544                         continue;
545
546                 engine->execlists.preempt_hang.count = 0;
547
548                 rq_a = igt_spinner_create_request(&a.spin,
549                                                   a.ctx, engine,
550                                                   MI_NOOP);
551                 if (IS_ERR(rq_a)) {
552                         err = PTR_ERR(rq_a);
553                         goto err_client_b;
554                 }
555
556                 i915_request_add(rq_a);
557                 if (!igt_wait_for_spinner(&a.spin, rq_a)) {
558                         pr_err("First client failed to start\n");
559                         goto err_wedged;
560                 }
561
562                 for (depth = 0; depth < 8; depth++) {
563                         rq_b = igt_spinner_create_request(&b.spin,
564                                                           b.ctx, engine,
565                                                           MI_NOOP);
566                         if (IS_ERR(rq_b)) {
567                                 err = PTR_ERR(rq_b);
568                                 goto err_client_b;
569                         }
570                         i915_request_add(rq_b);
571
572                         GEM_BUG_ON(i915_request_completed(rq_a));
573                         engine->schedule(rq_a, &attr);
574                         igt_spinner_end(&a.spin);
575
576                         if (!igt_wait_for_spinner(&b.spin, rq_b)) {
577                                 pr_err("Second client failed to start\n");
578                                 goto err_wedged;
579                         }
580
581                         swap(a, b);
582                         rq_a = rq_b;
583                 }
584                 igt_spinner_end(&a.spin);
585
586                 if (engine->execlists.preempt_hang.count) {
587                         pr_err("Preemption recorded x%d, depth %d; should have been suppressed!\n",
588                                engine->execlists.preempt_hang.count,
589                                depth);
590                         err = -EINVAL;
591                         goto err_client_b;
592                 }
593
594                 if (igt_flush_test(i915, I915_WAIT_LOCKED))
595                         goto err_wedged;
596         }
597
598         err = 0;
599 err_client_b:
600         preempt_client_fini(&b);
601 err_client_a:
602         preempt_client_fini(&a);
603 err_unlock:
604         if (igt_flush_test(i915, I915_WAIT_LOCKED))
605                 err = -EIO;
606         intel_runtime_pm_put(i915, wakeref);
607         mutex_unlock(&i915->drm.struct_mutex);
608         return err;
609
610 err_wedged:
611         igt_spinner_end(&b.spin);
612         igt_spinner_end(&a.spin);
613         i915_gem_set_wedged(i915);
614         err = -EIO;
615         goto err_client_b;
616 }
617
618 static int __i915_sw_fence_call
619 dummy_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
620 {
621         return NOTIFY_DONE;
622 }
623
624 static struct i915_request *dummy_request(struct intel_engine_cs *engine)
625 {
626         struct i915_request *rq;
627
628         rq = kzalloc(sizeof(*rq), GFP_KERNEL);
629         if (!rq)
630                 return NULL;
631
632         INIT_LIST_HEAD(&rq->active_list);
633         rq->engine = engine;
634
635         i915_sched_node_init(&rq->sched);
636
637         /* mark this request as permanently incomplete */
638         rq->fence.seqno = 1;
639         BUILD_BUG_ON(sizeof(rq->fence.seqno) != 8); /* upper 32b == 0 */
640         rq->hwsp_seqno = (u32 *)&rq->fence.seqno + 1;
641         GEM_BUG_ON(i915_request_completed(rq));
642
643         i915_sw_fence_init(&rq->submit, dummy_notify);
644         i915_sw_fence_commit(&rq->submit);
645
646         return rq;
647 }
648
649 static void dummy_request_free(struct i915_request *dummy)
650 {
651         i915_request_mark_complete(dummy);
652         i915_sched_node_fini(&dummy->sched);
653         i915_sw_fence_fini(&dummy->submit);
654
655         dma_fence_free(&dummy->fence);
656 }
657
658 static int live_suppress_wait_preempt(void *arg)
659 {
660         struct drm_i915_private *i915 = arg;
661         struct preempt_client client[4];
662         struct intel_engine_cs *engine;
663         enum intel_engine_id id;
664         intel_wakeref_t wakeref;
665         int err = -ENOMEM;
666         int i;
667
668         /*
669          * Waiters are given a little priority nudge, but not enough
670          * to actually cause any preemption. Double check that we do
671          * not needlessly generate preempt-to-idle cycles.
672          */
673
674         if (!HAS_LOGICAL_RING_PREEMPTION(i915))
675                 return 0;
676
677         mutex_lock(&i915->drm.struct_mutex);
678         wakeref = intel_runtime_pm_get(i915);
679
680         if (preempt_client_init(i915, &client[0])) /* ELSP[0] */
681                 goto err_unlock;
682         if (preempt_client_init(i915, &client[1])) /* ELSP[1] */
683                 goto err_client_0;
684         if (preempt_client_init(i915, &client[2])) /* head of queue */
685                 goto err_client_1;
686         if (preempt_client_init(i915, &client[3])) /* bystander */
687                 goto err_client_2;
688
689         for_each_engine(engine, i915, id) {
690                 int depth;
691
692                 if (!intel_engine_has_preemption(engine))
693                         continue;
694
695                 if (!engine->emit_init_breadcrumb)
696                         continue;
697
698                 for (depth = 0; depth < ARRAY_SIZE(client); depth++) {
699                         struct i915_request *rq[ARRAY_SIZE(client)];
700                         struct i915_request *dummy;
701
702                         engine->execlists.preempt_hang.count = 0;
703
704                         dummy = dummy_request(engine);
705                         if (!dummy)
706                                 goto err_client_3;
707
708                         for (i = 0; i < ARRAY_SIZE(client); i++) {
709                                 rq[i] = igt_spinner_create_request(&client[i].spin,
710                                                                    client[i].ctx, engine,
711                                                                    MI_NOOP);
712                                 if (IS_ERR(rq[i])) {
713                                         err = PTR_ERR(rq[i]);
714                                         goto err_wedged;
715                                 }
716
717                                 /* Disable NEWCLIENT promotion */
718                                 __i915_active_request_set(&rq[i]->timeline->last_request,
719                                                           dummy);
720                                 i915_request_add(rq[i]);
721                         }
722
723                         dummy_request_free(dummy);
724
725                         GEM_BUG_ON(i915_request_completed(rq[0]));
726                         if (!igt_wait_for_spinner(&client[0].spin, rq[0])) {
727                                 pr_err("%s: First client failed to start\n",
728                                        engine->name);
729                                 goto err_wedged;
730                         }
731                         GEM_BUG_ON(!i915_request_started(rq[0]));
732
733                         if (i915_request_wait(rq[depth],
734                                               I915_WAIT_LOCKED |
735                                               I915_WAIT_PRIORITY,
736                                               1) != -ETIME) {
737                                 pr_err("%s: Waiter depth:%d completed!\n",
738                                        engine->name, depth);
739                                 goto err_wedged;
740                         }
741
742                         for (i = 0; i < ARRAY_SIZE(client); i++)
743                                 igt_spinner_end(&client[i].spin);
744
745                         if (igt_flush_test(i915, I915_WAIT_LOCKED))
746                                 goto err_wedged;
747
748                         if (engine->execlists.preempt_hang.count) {
749                                 pr_err("%s: Preemption recorded x%d, depth %d; should have been suppressed!\n",
750                                        engine->name,
751                                        engine->execlists.preempt_hang.count,
752                                        depth);
753                                 err = -EINVAL;
754                                 goto err_client_3;
755                         }
756                 }
757         }
758
759         err = 0;
760 err_client_3:
761         preempt_client_fini(&client[3]);
762 err_client_2:
763         preempt_client_fini(&client[2]);
764 err_client_1:
765         preempt_client_fini(&client[1]);
766 err_client_0:
767         preempt_client_fini(&client[0]);
768 err_unlock:
769         if (igt_flush_test(i915, I915_WAIT_LOCKED))
770                 err = -EIO;
771         intel_runtime_pm_put(i915, wakeref);
772         mutex_unlock(&i915->drm.struct_mutex);
773         return err;
774
775 err_wedged:
776         for (i = 0; i < ARRAY_SIZE(client); i++)
777                 igt_spinner_end(&client[i].spin);
778         i915_gem_set_wedged(i915);
779         err = -EIO;
780         goto err_client_3;
781 }
782
783 static int live_chain_preempt(void *arg)
784 {
785         struct drm_i915_private *i915 = arg;
786         struct intel_engine_cs *engine;
787         struct preempt_client hi, lo;
788         enum intel_engine_id id;
789         intel_wakeref_t wakeref;
790         int err = -ENOMEM;
791
792         /*
793          * Build a chain AB...BA between two contexts (A, B) and request
794          * preemption of the last request. It should then complete before
795          * the previously submitted spinner in B.
796          */
797
798         if (!HAS_LOGICAL_RING_PREEMPTION(i915))
799                 return 0;
800
801         mutex_lock(&i915->drm.struct_mutex);
802         wakeref = intel_runtime_pm_get(i915);
803
804         if (preempt_client_init(i915, &hi))
805                 goto err_unlock;
806
807         if (preempt_client_init(i915, &lo))
808                 goto err_client_hi;
809
810         for_each_engine(engine, i915, id) {
811                 struct i915_sched_attr attr = {
812                         .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX),
813                 };
814                 struct igt_live_test t;
815                 struct i915_request *rq;
816                 int ring_size, count, i;
817
818                 if (!intel_engine_has_preemption(engine))
819                         continue;
820
821                 rq = igt_spinner_create_request(&lo.spin,
822                                                 lo.ctx, engine,
823                                                 MI_ARB_CHECK);
824                 if (IS_ERR(rq))
825                         goto err_wedged;
826                 i915_request_add(rq);
827
828                 ring_size = rq->wa_tail - rq->head;
829                 if (ring_size < 0)
830                         ring_size += rq->ring->size;
831                 ring_size = rq->ring->size / ring_size;
832                 pr_debug("%s(%s): Using maximum of %d requests\n",
833                          __func__, engine->name, ring_size);
834
835                 igt_spinner_end(&lo.spin);
836                 if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 2) < 0) {
837                         pr_err("Timed out waiting to flush %s\n", engine->name);
838                         goto err_wedged;
839                 }
840
841                 if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
842                         err = -EIO;
843                         goto err_wedged;
844                 }
845
846                 for_each_prime_number_from(count, 1, ring_size) {
847                         rq = igt_spinner_create_request(&hi.spin,
848                                                         hi.ctx, engine,
849                                                         MI_ARB_CHECK);
850                         if (IS_ERR(rq))
851                                 goto err_wedged;
852                         i915_request_add(rq);
853                         if (!igt_wait_for_spinner(&hi.spin, rq))
854                                 goto err_wedged;
855
856                         rq = igt_spinner_create_request(&lo.spin,
857                                                         lo.ctx, engine,
858                                                         MI_ARB_CHECK);
859                         if (IS_ERR(rq))
860                                 goto err_wedged;
861                         i915_request_add(rq);
862
863                         for (i = 0; i < count; i++) {
864                                 rq = i915_request_alloc(engine, lo.ctx);
865                                 if (IS_ERR(rq))
866                                         goto err_wedged;
867                                 i915_request_add(rq);
868                         }
869
870                         rq = i915_request_alloc(engine, hi.ctx);
871                         if (IS_ERR(rq))
872                                 goto err_wedged;
873                         i915_request_add(rq);
874                         engine->schedule(rq, &attr);
875
876                         igt_spinner_end(&hi.spin);
877                         if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0) {
878                                 struct drm_printer p =
879                                         drm_info_printer(i915->drm.dev);
880
881                                 pr_err("Failed to preempt over chain of %d\n",
882                                        count);
883                                 intel_engine_dump(engine, &p,
884                                                   "%s\n", engine->name);
885                                 goto err_wedged;
886                         }
887                         igt_spinner_end(&lo.spin);
888
889                         rq = i915_request_alloc(engine, lo.ctx);
890                         if (IS_ERR(rq))
891                                 goto err_wedged;
892                         i915_request_add(rq);
893                         if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0) {
894                                 struct drm_printer p =
895                                         drm_info_printer(i915->drm.dev);
896
897                                 pr_err("Failed to flush low priority chain of %d requests\n",
898                                        count);
899                                 intel_engine_dump(engine, &p,
900                                                   "%s\n", engine->name);
901                                 goto err_wedged;
902                         }
903                 }
904
905                 if (igt_live_test_end(&t)) {
906                         err = -EIO;
907                         goto err_wedged;
908                 }
909         }
910
911         err = 0;
912 err_client_lo:
913         preempt_client_fini(&lo);
914 err_client_hi:
915         preempt_client_fini(&hi);
916 err_unlock:
917         if (igt_flush_test(i915, I915_WAIT_LOCKED))
918                 err = -EIO;
919         intel_runtime_pm_put(i915, wakeref);
920         mutex_unlock(&i915->drm.struct_mutex);
921         return err;
922
923 err_wedged:
924         igt_spinner_end(&hi.spin);
925         igt_spinner_end(&lo.spin);
926         i915_gem_set_wedged(i915);
927         err = -EIO;
928         goto err_client_lo;
929 }
930
931 static int live_preempt_hang(void *arg)
932 {
933         struct drm_i915_private *i915 = arg;
934         struct i915_gem_context *ctx_hi, *ctx_lo;
935         struct igt_spinner spin_hi, spin_lo;
936         struct intel_engine_cs *engine;
937         enum intel_engine_id id;
938         intel_wakeref_t wakeref;
939         int err = -ENOMEM;
940
941         if (!HAS_LOGICAL_RING_PREEMPTION(i915))
942                 return 0;
943
944         if (!intel_has_reset_engine(i915))
945                 return 0;
946
947         mutex_lock(&i915->drm.struct_mutex);
948         wakeref = intel_runtime_pm_get(i915);
949
950         if (igt_spinner_init(&spin_hi, i915))
951                 goto err_unlock;
952
953         if (igt_spinner_init(&spin_lo, i915))
954                 goto err_spin_hi;
955
956         ctx_hi = kernel_context(i915);
957         if (!ctx_hi)
958                 goto err_spin_lo;
959         ctx_hi->sched.priority =
960                 I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
961
962         ctx_lo = kernel_context(i915);
963         if (!ctx_lo)
964                 goto err_ctx_hi;
965         ctx_lo->sched.priority =
966                 I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
967
968         for_each_engine(engine, i915, id) {
969                 struct i915_request *rq;
970
971                 if (!intel_engine_has_preemption(engine))
972                         continue;
973
974                 rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
975                                                 MI_ARB_CHECK);
976                 if (IS_ERR(rq)) {
977                         err = PTR_ERR(rq);
978                         goto err_ctx_lo;
979                 }
980
981                 i915_request_add(rq);
982                 if (!igt_wait_for_spinner(&spin_lo, rq)) {
983                         GEM_TRACE("lo spinner failed to start\n");
984                         GEM_TRACE_DUMP();
985                         i915_gem_set_wedged(i915);
986                         err = -EIO;
987                         goto err_ctx_lo;
988                 }
989
990                 rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine,
991                                                 MI_ARB_CHECK);
992                 if (IS_ERR(rq)) {
993                         igt_spinner_end(&spin_lo);
994                         err = PTR_ERR(rq);
995                         goto err_ctx_lo;
996                 }
997
998                 init_completion(&engine->execlists.preempt_hang.completion);
999                 engine->execlists.preempt_hang.inject_hang = true;
1000
1001                 i915_request_add(rq);
1002
1003                 if (!wait_for_completion_timeout(&engine->execlists.preempt_hang.completion,
1004                                                  HZ / 10)) {
1005                         pr_err("Preemption did not occur within timeout!");
1006                         GEM_TRACE_DUMP();
1007                         i915_gem_set_wedged(i915);
1008                         err = -EIO;
1009                         goto err_ctx_lo;
1010                 }
1011
1012                 set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
1013                 i915_reset_engine(engine, NULL);
1014                 clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
1015
1016                 engine->execlists.preempt_hang.inject_hang = false;
1017
1018                 if (!igt_wait_for_spinner(&spin_hi, rq)) {
1019                         GEM_TRACE("hi spinner failed to start\n");
1020                         GEM_TRACE_DUMP();
1021                         i915_gem_set_wedged(i915);
1022                         err = -EIO;
1023                         goto err_ctx_lo;
1024                 }
1025
1026                 igt_spinner_end(&spin_hi);
1027                 igt_spinner_end(&spin_lo);
1028                 if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
1029                         err = -EIO;
1030                         goto err_ctx_lo;
1031                 }
1032         }
1033
1034         err = 0;
1035 err_ctx_lo:
1036         kernel_context_close(ctx_lo);
1037 err_ctx_hi:
1038         kernel_context_close(ctx_hi);
1039 err_spin_lo:
1040         igt_spinner_fini(&spin_lo);
1041 err_spin_hi:
1042         igt_spinner_fini(&spin_hi);
1043 err_unlock:
1044         igt_flush_test(i915, I915_WAIT_LOCKED);
1045         intel_runtime_pm_put(i915, wakeref);
1046         mutex_unlock(&i915->drm.struct_mutex);
1047         return err;
1048 }
1049
1050 static int random_range(struct rnd_state *rnd, int min, int max)
1051 {
1052         return i915_prandom_u32_max_state(max - min, rnd) + min;
1053 }
1054
1055 static int random_priority(struct rnd_state *rnd)
1056 {
1057         return random_range(rnd, I915_PRIORITY_MIN, I915_PRIORITY_MAX);
1058 }
1059
1060 struct preempt_smoke {
1061         struct drm_i915_private *i915;
1062         struct i915_gem_context **contexts;
1063         struct intel_engine_cs *engine;
1064         struct drm_i915_gem_object *batch;
1065         unsigned int ncontext;
1066         struct rnd_state prng;
1067         unsigned long count;
1068 };
1069
1070 static struct i915_gem_context *smoke_context(struct preempt_smoke *smoke)
1071 {
1072         return smoke->contexts[i915_prandom_u32_max_state(smoke->ncontext,
1073                                                           &smoke->prng)];
1074 }
1075
1076 static int smoke_submit(struct preempt_smoke *smoke,
1077                         struct i915_gem_context *ctx, int prio,
1078                         struct drm_i915_gem_object *batch)
1079 {
1080         struct i915_request *rq;
1081         struct i915_vma *vma = NULL;
1082         int err = 0;
1083
1084         if (batch) {
1085                 vma = i915_vma_instance(batch, &ctx->ppgtt->vm, NULL);
1086                 if (IS_ERR(vma))
1087                         return PTR_ERR(vma);
1088
1089                 err = i915_vma_pin(vma, 0, 0, PIN_USER);
1090                 if (err)
1091                         return err;
1092         }
1093
1094         ctx->sched.priority = prio;
1095
1096         rq = i915_request_alloc(smoke->engine, ctx);
1097         if (IS_ERR(rq)) {
1098                 err = PTR_ERR(rq);
1099                 goto unpin;
1100         }
1101
1102         if (vma) {
1103                 err = rq->engine->emit_bb_start(rq,
1104                                                 vma->node.start,
1105                                                 PAGE_SIZE, 0);
1106                 if (!err)
1107                         err = i915_vma_move_to_active(vma, rq, 0);
1108         }
1109
1110         i915_request_add(rq);
1111
1112 unpin:
1113         if (vma)
1114                 i915_vma_unpin(vma);
1115
1116         return err;
1117 }
1118
1119 static int smoke_crescendo_thread(void *arg)
1120 {
1121         struct preempt_smoke *smoke = arg;
1122         IGT_TIMEOUT(end_time);
1123         unsigned long count;
1124
1125         count = 0;
1126         do {
1127                 struct i915_gem_context *ctx = smoke_context(smoke);
1128                 int err;
1129
1130                 mutex_lock(&smoke->i915->drm.struct_mutex);
1131                 err = smoke_submit(smoke,
1132                                    ctx, count % I915_PRIORITY_MAX,
1133                                    smoke->batch);
1134                 mutex_unlock(&smoke->i915->drm.struct_mutex);
1135                 if (err)
1136                         return err;
1137
1138                 count++;
1139         } while (!__igt_timeout(end_time, NULL));
1140
1141         smoke->count = count;
1142         return 0;
1143 }
1144
1145 static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags)
1146 #define BATCH BIT(0)
1147 {
1148         struct task_struct *tsk[I915_NUM_ENGINES] = {};
1149         struct preempt_smoke arg[I915_NUM_ENGINES];
1150         struct intel_engine_cs *engine;
1151         enum intel_engine_id id;
1152         unsigned long count;
1153         int err = 0;
1154
1155         mutex_unlock(&smoke->i915->drm.struct_mutex);
1156
1157         for_each_engine(engine, smoke->i915, id) {
1158                 arg[id] = *smoke;
1159                 arg[id].engine = engine;
1160                 if (!(flags & BATCH))
1161                         arg[id].batch = NULL;
1162                 arg[id].count = 0;
1163
1164                 tsk[id] = kthread_run(smoke_crescendo_thread, &arg,
1165                                       "igt/smoke:%d", id);
1166                 if (IS_ERR(tsk[id])) {
1167                         err = PTR_ERR(tsk[id]);
1168                         break;
1169                 }
1170                 get_task_struct(tsk[id]);
1171         }
1172
1173         count = 0;
1174         for_each_engine(engine, smoke->i915, id) {
1175                 int status;
1176
1177                 if (IS_ERR_OR_NULL(tsk[id]))
1178                         continue;
1179
1180                 status = kthread_stop(tsk[id]);
1181                 if (status && !err)
1182                         err = status;
1183
1184                 count += arg[id].count;
1185
1186                 put_task_struct(tsk[id]);
1187         }
1188
1189         mutex_lock(&smoke->i915->drm.struct_mutex);
1190
1191         pr_info("Submitted %lu crescendo:%x requests across %d engines and %d contexts\n",
1192                 count, flags,
1193                 RUNTIME_INFO(smoke->i915)->num_engines, smoke->ncontext);
1194         return 0;
1195 }
1196
1197 static int smoke_random(struct preempt_smoke *smoke, unsigned int flags)
1198 {
1199         enum intel_engine_id id;
1200         IGT_TIMEOUT(end_time);
1201         unsigned long count;
1202
1203         count = 0;
1204         do {
1205                 for_each_engine(smoke->engine, smoke->i915, id) {
1206                         struct i915_gem_context *ctx = smoke_context(smoke);
1207                         int err;
1208
1209                         err = smoke_submit(smoke,
1210                                            ctx, random_priority(&smoke->prng),
1211                                            flags & BATCH ? smoke->batch : NULL);
1212                         if (err)
1213                                 return err;
1214
1215                         count++;
1216                 }
1217         } while (!__igt_timeout(end_time, NULL));
1218
1219         pr_info("Submitted %lu random:%x requests across %d engines and %d contexts\n",
1220                 count, flags,
1221                 RUNTIME_INFO(smoke->i915)->num_engines, smoke->ncontext);
1222         return 0;
1223 }
1224
1225 static int live_preempt_smoke(void *arg)
1226 {
1227         struct preempt_smoke smoke = {
1228                 .i915 = arg,
1229                 .prng = I915_RND_STATE_INITIALIZER(i915_selftest.random_seed),
1230                 .ncontext = 1024,
1231         };
1232         const unsigned int phase[] = { 0, BATCH };
1233         intel_wakeref_t wakeref;
1234         struct igt_live_test t;
1235         int err = -ENOMEM;
1236         u32 *cs;
1237         int n;
1238
1239         if (!HAS_LOGICAL_RING_PREEMPTION(smoke.i915))
1240                 return 0;
1241
1242         smoke.contexts = kmalloc_array(smoke.ncontext,
1243                                        sizeof(*smoke.contexts),
1244                                        GFP_KERNEL);
1245         if (!smoke.contexts)
1246                 return -ENOMEM;
1247
1248         mutex_lock(&smoke.i915->drm.struct_mutex);
1249         wakeref = intel_runtime_pm_get(smoke.i915);
1250
1251         smoke.batch = i915_gem_object_create_internal(smoke.i915, PAGE_SIZE);
1252         if (IS_ERR(smoke.batch)) {
1253                 err = PTR_ERR(smoke.batch);
1254                 goto err_unlock;
1255         }
1256
1257         cs = i915_gem_object_pin_map(smoke.batch, I915_MAP_WB);
1258         if (IS_ERR(cs)) {
1259                 err = PTR_ERR(cs);
1260                 goto err_batch;
1261         }
1262         for (n = 0; n < PAGE_SIZE / sizeof(*cs) - 1; n++)
1263                 cs[n] = MI_ARB_CHECK;
1264         cs[n] = MI_BATCH_BUFFER_END;
1265         i915_gem_object_flush_map(smoke.batch);
1266         i915_gem_object_unpin_map(smoke.batch);
1267
1268         if (igt_live_test_begin(&t, smoke.i915, __func__, "all")) {
1269                 err = -EIO;
1270                 goto err_batch;
1271         }
1272
1273         for (n = 0; n < smoke.ncontext; n++) {
1274                 smoke.contexts[n] = kernel_context(smoke.i915);
1275                 if (!smoke.contexts[n])
1276                         goto err_ctx;
1277         }
1278
1279         for (n = 0; n < ARRAY_SIZE(phase); n++) {
1280                 err = smoke_crescendo(&smoke, phase[n]);
1281                 if (err)
1282                         goto err_ctx;
1283
1284                 err = smoke_random(&smoke, phase[n]);
1285                 if (err)
1286                         goto err_ctx;
1287         }
1288
1289 err_ctx:
1290         if (igt_live_test_end(&t))
1291                 err = -EIO;
1292
1293         for (n = 0; n < smoke.ncontext; n++) {
1294                 if (!smoke.contexts[n])
1295                         break;
1296                 kernel_context_close(smoke.contexts[n]);
1297         }
1298
1299 err_batch:
1300         i915_gem_object_put(smoke.batch);
1301 err_unlock:
1302         intel_runtime_pm_put(smoke.i915, wakeref);
1303         mutex_unlock(&smoke.i915->drm.struct_mutex);
1304         kfree(smoke.contexts);
1305
1306         return err;
1307 }
1308
1309 int intel_execlists_live_selftests(struct drm_i915_private *i915)
1310 {
1311         static const struct i915_subtest tests[] = {
1312                 SUBTEST(live_sanitycheck),
1313                 SUBTEST(live_busywait_preempt),
1314                 SUBTEST(live_preempt),
1315                 SUBTEST(live_late_preempt),
1316                 SUBTEST(live_suppress_self_preempt),
1317                 SUBTEST(live_suppress_wait_preempt),
1318                 SUBTEST(live_chain_preempt),
1319                 SUBTEST(live_preempt_hang),
1320                 SUBTEST(live_preempt_smoke),
1321         };
1322
1323         if (!HAS_EXECLISTS(i915))
1324                 return 0;
1325
1326         if (i915_terminally_wedged(i915))
1327                 return 0;
1328
1329         return i915_subtests(tests, i915);
1330 }