]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/gpu/drm/i915/gt/selftest_lrc.c
drm/i915: Bump signaler priority on adding a waiter
[linux.git] / drivers / gpu / drm / i915 / gt / selftest_lrc.c
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2018 Intel Corporation
5  */
6
7 #include <linux/prime_numbers.h>
8
9 #include "gt/intel_reset.h"
10 #include "i915_selftest.h"
11 #include "selftests/i915_random.h"
12 #include "selftests/igt_flush_test.h"
13 #include "selftests/igt_gem_utils.h"
14 #include "selftests/igt_live_test.h"
15 #include "selftests/igt_spinner.h"
16 #include "selftests/mock_context.h"
17
18 static int live_sanitycheck(void *arg)
19 {
20         struct drm_i915_private *i915 = arg;
21         struct intel_engine_cs *engine;
22         struct i915_gem_context *ctx;
23         enum intel_engine_id id;
24         struct igt_spinner spin;
25         intel_wakeref_t wakeref;
26         int err = -ENOMEM;
27
28         if (!HAS_LOGICAL_RING_CONTEXTS(i915))
29                 return 0;
30
31         mutex_lock(&i915->drm.struct_mutex);
32         wakeref = intel_runtime_pm_get(i915);
33
34         if (igt_spinner_init(&spin, i915))
35                 goto err_unlock;
36
37         ctx = kernel_context(i915);
38         if (!ctx)
39                 goto err_spin;
40
41         for_each_engine(engine, i915, id) {
42                 struct i915_request *rq;
43
44                 rq = igt_spinner_create_request(&spin, ctx, engine, MI_NOOP);
45                 if (IS_ERR(rq)) {
46                         err = PTR_ERR(rq);
47                         goto err_ctx;
48                 }
49
50                 i915_request_add(rq);
51                 if (!igt_wait_for_spinner(&spin, rq)) {
52                         GEM_TRACE("spinner failed to start\n");
53                         GEM_TRACE_DUMP();
54                         i915_gem_set_wedged(i915);
55                         err = -EIO;
56                         goto err_ctx;
57                 }
58
59                 igt_spinner_end(&spin);
60                 if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
61                         err = -EIO;
62                         goto err_ctx;
63                 }
64         }
65
66         err = 0;
67 err_ctx:
68         kernel_context_close(ctx);
69 err_spin:
70         igt_spinner_fini(&spin);
71 err_unlock:
72         igt_flush_test(i915, I915_WAIT_LOCKED);
73         intel_runtime_pm_put(i915, wakeref);
74         mutex_unlock(&i915->drm.struct_mutex);
75         return err;
76 }
77
78 static int live_busywait_preempt(void *arg)
79 {
80         struct drm_i915_private *i915 = arg;
81         struct i915_gem_context *ctx_hi, *ctx_lo;
82         struct intel_engine_cs *engine;
83         struct drm_i915_gem_object *obj;
84         struct i915_vma *vma;
85         enum intel_engine_id id;
86         intel_wakeref_t wakeref;
87         int err = -ENOMEM;
88         u32 *map;
89
90         /*
91          * Verify that even without HAS_LOGICAL_RING_PREEMPTION, we can
92          * preempt the busywaits used to synchronise between rings.
93          */
94
95         mutex_lock(&i915->drm.struct_mutex);
96         wakeref = intel_runtime_pm_get(i915);
97
98         ctx_hi = kernel_context(i915);
99         if (!ctx_hi)
100                 goto err_unlock;
101         ctx_hi->sched.priority =
102                 I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
103
104         ctx_lo = kernel_context(i915);
105         if (!ctx_lo)
106                 goto err_ctx_hi;
107         ctx_lo->sched.priority =
108                 I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
109
110         obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
111         if (IS_ERR(obj)) {
112                 err = PTR_ERR(obj);
113                 goto err_ctx_lo;
114         }
115
116         map = i915_gem_object_pin_map(obj, I915_MAP_WC);
117         if (IS_ERR(map)) {
118                 err = PTR_ERR(map);
119                 goto err_obj;
120         }
121
122         vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
123         if (IS_ERR(vma)) {
124                 err = PTR_ERR(vma);
125                 goto err_map;
126         }
127
128         err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
129         if (err)
130                 goto err_map;
131
132         for_each_engine(engine, i915, id) {
133                 struct i915_request *lo, *hi;
134                 struct igt_live_test t;
135                 u32 *cs;
136
137                 if (!intel_engine_can_store_dword(engine))
138                         continue;
139
140                 if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
141                         err = -EIO;
142                         goto err_vma;
143                 }
144
145                 /*
146                  * We create two requests. The low priority request
147                  * busywaits on a semaphore (inside the ringbuffer where
148                  * is should be preemptible) and the high priority requests
149                  * uses a MI_STORE_DWORD_IMM to update the semaphore value
150                  * allowing the first request to complete. If preemption
151                  * fails, we hang instead.
152                  */
153
154                 lo = igt_request_alloc(ctx_lo, engine);
155                 if (IS_ERR(lo)) {
156                         err = PTR_ERR(lo);
157                         goto err_vma;
158                 }
159
160                 cs = intel_ring_begin(lo, 8);
161                 if (IS_ERR(cs)) {
162                         err = PTR_ERR(cs);
163                         i915_request_add(lo);
164                         goto err_vma;
165                 }
166
167                 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
168                 *cs++ = i915_ggtt_offset(vma);
169                 *cs++ = 0;
170                 *cs++ = 1;
171
172                 /* XXX Do we need a flush + invalidate here? */
173
174                 *cs++ = MI_SEMAPHORE_WAIT |
175                         MI_SEMAPHORE_GLOBAL_GTT |
176                         MI_SEMAPHORE_POLL |
177                         MI_SEMAPHORE_SAD_EQ_SDD;
178                 *cs++ = 0;
179                 *cs++ = i915_ggtt_offset(vma);
180                 *cs++ = 0;
181
182                 intel_ring_advance(lo, cs);
183                 i915_request_add(lo);
184
185                 if (wait_for(READ_ONCE(*map), 10)) {
186                         err = -ETIMEDOUT;
187                         goto err_vma;
188                 }
189
190                 /* Low priority request should be busywaiting now */
191                 if (i915_request_wait(lo, I915_WAIT_LOCKED, 1) != -ETIME) {
192                         pr_err("%s: Busywaiting request did not!\n",
193                                engine->name);
194                         err = -EIO;
195                         goto err_vma;
196                 }
197
198                 hi = igt_request_alloc(ctx_hi, engine);
199                 if (IS_ERR(hi)) {
200                         err = PTR_ERR(hi);
201                         goto err_vma;
202                 }
203
204                 cs = intel_ring_begin(hi, 4);
205                 if (IS_ERR(cs)) {
206                         err = PTR_ERR(cs);
207                         i915_request_add(hi);
208                         goto err_vma;
209                 }
210
211                 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
212                 *cs++ = i915_ggtt_offset(vma);
213                 *cs++ = 0;
214                 *cs++ = 0;
215
216                 intel_ring_advance(hi, cs);
217                 i915_request_add(hi);
218
219                 if (i915_request_wait(lo, I915_WAIT_LOCKED, HZ / 5) < 0) {
220                         struct drm_printer p = drm_info_printer(i915->drm.dev);
221
222                         pr_err("%s: Failed to preempt semaphore busywait!\n",
223                                engine->name);
224
225                         intel_engine_dump(engine, &p, "%s\n", engine->name);
226                         GEM_TRACE_DUMP();
227
228                         i915_gem_set_wedged(i915);
229                         err = -EIO;
230                         goto err_vma;
231                 }
232                 GEM_BUG_ON(READ_ONCE(*map));
233
234                 if (igt_live_test_end(&t)) {
235                         err = -EIO;
236                         goto err_vma;
237                 }
238         }
239
240         err = 0;
241 err_vma:
242         i915_vma_unpin(vma);
243 err_map:
244         i915_gem_object_unpin_map(obj);
245 err_obj:
246         i915_gem_object_put(obj);
247 err_ctx_lo:
248         kernel_context_close(ctx_lo);
249 err_ctx_hi:
250         kernel_context_close(ctx_hi);
251 err_unlock:
252         if (igt_flush_test(i915, I915_WAIT_LOCKED))
253                 err = -EIO;
254         intel_runtime_pm_put(i915, wakeref);
255         mutex_unlock(&i915->drm.struct_mutex);
256         return err;
257 }
258
259 static int live_preempt(void *arg)
260 {
261         struct drm_i915_private *i915 = arg;
262         struct i915_gem_context *ctx_hi, *ctx_lo;
263         struct igt_spinner spin_hi, spin_lo;
264         struct intel_engine_cs *engine;
265         enum intel_engine_id id;
266         intel_wakeref_t wakeref;
267         int err = -ENOMEM;
268
269         if (!HAS_LOGICAL_RING_PREEMPTION(i915))
270                 return 0;
271
272         if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION))
273                 pr_err("Logical preemption supported, but not exposed\n");
274
275         mutex_lock(&i915->drm.struct_mutex);
276         wakeref = intel_runtime_pm_get(i915);
277
278         if (igt_spinner_init(&spin_hi, i915))
279                 goto err_unlock;
280
281         if (igt_spinner_init(&spin_lo, i915))
282                 goto err_spin_hi;
283
284         ctx_hi = kernel_context(i915);
285         if (!ctx_hi)
286                 goto err_spin_lo;
287         ctx_hi->sched.priority =
288                 I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
289
290         ctx_lo = kernel_context(i915);
291         if (!ctx_lo)
292                 goto err_ctx_hi;
293         ctx_lo->sched.priority =
294                 I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
295
296         for_each_engine(engine, i915, id) {
297                 struct igt_live_test t;
298                 struct i915_request *rq;
299
300                 if (!intel_engine_has_preemption(engine))
301                         continue;
302
303                 if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
304                         err = -EIO;
305                         goto err_ctx_lo;
306                 }
307
308                 rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
309                                                 MI_ARB_CHECK);
310                 if (IS_ERR(rq)) {
311                         err = PTR_ERR(rq);
312                         goto err_ctx_lo;
313                 }
314
315                 i915_request_add(rq);
316                 if (!igt_wait_for_spinner(&spin_lo, rq)) {
317                         GEM_TRACE("lo spinner failed to start\n");
318                         GEM_TRACE_DUMP();
319                         i915_gem_set_wedged(i915);
320                         err = -EIO;
321                         goto err_ctx_lo;
322                 }
323
324                 rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine,
325                                                 MI_ARB_CHECK);
326                 if (IS_ERR(rq)) {
327                         igt_spinner_end(&spin_lo);
328                         err = PTR_ERR(rq);
329                         goto err_ctx_lo;
330                 }
331
332                 i915_request_add(rq);
333                 if (!igt_wait_for_spinner(&spin_hi, rq)) {
334                         GEM_TRACE("hi spinner failed to start\n");
335                         GEM_TRACE_DUMP();
336                         i915_gem_set_wedged(i915);
337                         err = -EIO;
338                         goto err_ctx_lo;
339                 }
340
341                 igt_spinner_end(&spin_hi);
342                 igt_spinner_end(&spin_lo);
343
344                 if (igt_live_test_end(&t)) {
345                         err = -EIO;
346                         goto err_ctx_lo;
347                 }
348         }
349
350         err = 0;
351 err_ctx_lo:
352         kernel_context_close(ctx_lo);
353 err_ctx_hi:
354         kernel_context_close(ctx_hi);
355 err_spin_lo:
356         igt_spinner_fini(&spin_lo);
357 err_spin_hi:
358         igt_spinner_fini(&spin_hi);
359 err_unlock:
360         igt_flush_test(i915, I915_WAIT_LOCKED);
361         intel_runtime_pm_put(i915, wakeref);
362         mutex_unlock(&i915->drm.struct_mutex);
363         return err;
364 }
365
366 static int live_late_preempt(void *arg)
367 {
368         struct drm_i915_private *i915 = arg;
369         struct i915_gem_context *ctx_hi, *ctx_lo;
370         struct igt_spinner spin_hi, spin_lo;
371         struct intel_engine_cs *engine;
372         struct i915_sched_attr attr = {};
373         enum intel_engine_id id;
374         intel_wakeref_t wakeref;
375         int err = -ENOMEM;
376
377         if (!HAS_LOGICAL_RING_PREEMPTION(i915))
378                 return 0;
379
380         mutex_lock(&i915->drm.struct_mutex);
381         wakeref = intel_runtime_pm_get(i915);
382
383         if (igt_spinner_init(&spin_hi, i915))
384                 goto err_unlock;
385
386         if (igt_spinner_init(&spin_lo, i915))
387                 goto err_spin_hi;
388
389         ctx_hi = kernel_context(i915);
390         if (!ctx_hi)
391                 goto err_spin_lo;
392
393         ctx_lo = kernel_context(i915);
394         if (!ctx_lo)
395                 goto err_ctx_hi;
396
397         for_each_engine(engine, i915, id) {
398                 struct igt_live_test t;
399                 struct i915_request *rq;
400
401                 if (!intel_engine_has_preemption(engine))
402                         continue;
403
404                 if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
405                         err = -EIO;
406                         goto err_ctx_lo;
407                 }
408
409                 rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
410                                                 MI_ARB_CHECK);
411                 if (IS_ERR(rq)) {
412                         err = PTR_ERR(rq);
413                         goto err_ctx_lo;
414                 }
415
416                 i915_request_add(rq);
417                 if (!igt_wait_for_spinner(&spin_lo, rq)) {
418                         pr_err("First context failed to start\n");
419                         goto err_wedged;
420                 }
421
422                 rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine,
423                                                 MI_NOOP);
424                 if (IS_ERR(rq)) {
425                         igt_spinner_end(&spin_lo);
426                         err = PTR_ERR(rq);
427                         goto err_ctx_lo;
428                 }
429
430                 i915_request_add(rq);
431                 if (igt_wait_for_spinner(&spin_hi, rq)) {
432                         pr_err("Second context overtook first?\n");
433                         goto err_wedged;
434                 }
435
436                 attr.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX);
437                 engine->schedule(rq, &attr);
438
439                 if (!igt_wait_for_spinner(&spin_hi, rq)) {
440                         pr_err("High priority context failed to preempt the low priority context\n");
441                         GEM_TRACE_DUMP();
442                         goto err_wedged;
443                 }
444
445                 igt_spinner_end(&spin_hi);
446                 igt_spinner_end(&spin_lo);
447
448                 if (igt_live_test_end(&t)) {
449                         err = -EIO;
450                         goto err_ctx_lo;
451                 }
452         }
453
454         err = 0;
455 err_ctx_lo:
456         kernel_context_close(ctx_lo);
457 err_ctx_hi:
458         kernel_context_close(ctx_hi);
459 err_spin_lo:
460         igt_spinner_fini(&spin_lo);
461 err_spin_hi:
462         igt_spinner_fini(&spin_hi);
463 err_unlock:
464         igt_flush_test(i915, I915_WAIT_LOCKED);
465         intel_runtime_pm_put(i915, wakeref);
466         mutex_unlock(&i915->drm.struct_mutex);
467         return err;
468
469 err_wedged:
470         igt_spinner_end(&spin_hi);
471         igt_spinner_end(&spin_lo);
472         i915_gem_set_wedged(i915);
473         err = -EIO;
474         goto err_ctx_lo;
475 }
476
477 struct preempt_client {
478         struct igt_spinner spin;
479         struct i915_gem_context *ctx;
480 };
481
482 static int preempt_client_init(struct drm_i915_private *i915,
483                                struct preempt_client *c)
484 {
485         c->ctx = kernel_context(i915);
486         if (!c->ctx)
487                 return -ENOMEM;
488
489         if (igt_spinner_init(&c->spin, i915))
490                 goto err_ctx;
491
492         return 0;
493
494 err_ctx:
495         kernel_context_close(c->ctx);
496         return -ENOMEM;
497 }
498
499 static void preempt_client_fini(struct preempt_client *c)
500 {
501         igt_spinner_fini(&c->spin);
502         kernel_context_close(c->ctx);
503 }
504
505 static int live_suppress_self_preempt(void *arg)
506 {
507         struct drm_i915_private *i915 = arg;
508         struct intel_engine_cs *engine;
509         struct i915_sched_attr attr = {
510                 .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX)
511         };
512         struct preempt_client a, b;
513         enum intel_engine_id id;
514         intel_wakeref_t wakeref;
515         int err = -ENOMEM;
516
517         /*
518          * Verify that if a preemption request does not cause a change in
519          * the current execution order, the preempt-to-idle injection is
520          * skipped and that we do not accidentally apply it after the CS
521          * completion event.
522          */
523
524         if (!HAS_LOGICAL_RING_PREEMPTION(i915))
525                 return 0;
526
527         if (USES_GUC_SUBMISSION(i915))
528                 return 0; /* presume black blox */
529
530         mutex_lock(&i915->drm.struct_mutex);
531         wakeref = intel_runtime_pm_get(i915);
532
533         if (preempt_client_init(i915, &a))
534                 goto err_unlock;
535         if (preempt_client_init(i915, &b))
536                 goto err_client_a;
537
538         for_each_engine(engine, i915, id) {
539                 struct i915_request *rq_a, *rq_b;
540                 int depth;
541
542                 if (!intel_engine_has_preemption(engine))
543                         continue;
544
545                 engine->execlists.preempt_hang.count = 0;
546
547                 rq_a = igt_spinner_create_request(&a.spin,
548                                                   a.ctx, engine,
549                                                   MI_NOOP);
550                 if (IS_ERR(rq_a)) {
551                         err = PTR_ERR(rq_a);
552                         goto err_client_b;
553                 }
554
555                 i915_request_add(rq_a);
556                 if (!igt_wait_for_spinner(&a.spin, rq_a)) {
557                         pr_err("First client failed to start\n");
558                         goto err_wedged;
559                 }
560
561                 for (depth = 0; depth < 8; depth++) {
562                         rq_b = igt_spinner_create_request(&b.spin,
563                                                           b.ctx, engine,
564                                                           MI_NOOP);
565                         if (IS_ERR(rq_b)) {
566                                 err = PTR_ERR(rq_b);
567                                 goto err_client_b;
568                         }
569                         i915_request_add(rq_b);
570
571                         GEM_BUG_ON(i915_request_completed(rq_a));
572                         engine->schedule(rq_a, &attr);
573                         igt_spinner_end(&a.spin);
574
575                         if (!igt_wait_for_spinner(&b.spin, rq_b)) {
576                                 pr_err("Second client failed to start\n");
577                                 goto err_wedged;
578                         }
579
580                         swap(a, b);
581                         rq_a = rq_b;
582                 }
583                 igt_spinner_end(&a.spin);
584
585                 if (engine->execlists.preempt_hang.count) {
586                         pr_err("Preemption recorded x%d, depth %d; should have been suppressed!\n",
587                                engine->execlists.preempt_hang.count,
588                                depth);
589                         err = -EINVAL;
590                         goto err_client_b;
591                 }
592
593                 if (igt_flush_test(i915, I915_WAIT_LOCKED))
594                         goto err_wedged;
595         }
596
597         err = 0;
598 err_client_b:
599         preempt_client_fini(&b);
600 err_client_a:
601         preempt_client_fini(&a);
602 err_unlock:
603         if (igt_flush_test(i915, I915_WAIT_LOCKED))
604                 err = -EIO;
605         intel_runtime_pm_put(i915, wakeref);
606         mutex_unlock(&i915->drm.struct_mutex);
607         return err;
608
609 err_wedged:
610         igt_spinner_end(&b.spin);
611         igt_spinner_end(&a.spin);
612         i915_gem_set_wedged(i915);
613         err = -EIO;
614         goto err_client_b;
615 }
616
617 static int __i915_sw_fence_call
618 dummy_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
619 {
620         return NOTIFY_DONE;
621 }
622
623 static struct i915_request *dummy_request(struct intel_engine_cs *engine)
624 {
625         struct i915_request *rq;
626
627         rq = kzalloc(sizeof(*rq), GFP_KERNEL);
628         if (!rq)
629                 return NULL;
630
631         INIT_LIST_HEAD(&rq->active_list);
632         rq->engine = engine;
633
634         i915_sched_node_init(&rq->sched);
635
636         /* mark this request as permanently incomplete */
637         rq->fence.seqno = 1;
638         BUILD_BUG_ON(sizeof(rq->fence.seqno) != 8); /* upper 32b == 0 */
639         rq->hwsp_seqno = (u32 *)&rq->fence.seqno + 1;
640         GEM_BUG_ON(i915_request_completed(rq));
641
642         i915_sw_fence_init(&rq->submit, dummy_notify);
643         set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
644
645         return rq;
646 }
647
648 static void dummy_request_free(struct i915_request *dummy)
649 {
650         /* We have to fake the CS interrupt to kick the next request */
651         i915_sw_fence_commit(&dummy->submit);
652
653         i915_request_mark_complete(dummy);
654         dma_fence_signal(&dummy->fence);
655
656         i915_sched_node_fini(&dummy->sched);
657         i915_sw_fence_fini(&dummy->submit);
658
659         dma_fence_free(&dummy->fence);
660 }
661
662 static int live_suppress_wait_preempt(void *arg)
663 {
664         struct drm_i915_private *i915 = arg;
665         struct preempt_client client[4];
666         struct intel_engine_cs *engine;
667         enum intel_engine_id id;
668         intel_wakeref_t wakeref;
669         int err = -ENOMEM;
670         int i;
671
672         /*
673          * Waiters are given a little priority nudge, but not enough
674          * to actually cause any preemption. Double check that we do
675          * not needlessly generate preempt-to-idle cycles.
676          */
677
678         if (!HAS_LOGICAL_RING_PREEMPTION(i915))
679                 return 0;
680
681         mutex_lock(&i915->drm.struct_mutex);
682         wakeref = intel_runtime_pm_get(i915);
683
684         if (preempt_client_init(i915, &client[0])) /* ELSP[0] */
685                 goto err_unlock;
686         if (preempt_client_init(i915, &client[1])) /* ELSP[1] */
687                 goto err_client_0;
688         if (preempt_client_init(i915, &client[2])) /* head of queue */
689                 goto err_client_1;
690         if (preempt_client_init(i915, &client[3])) /* bystander */
691                 goto err_client_2;
692
693         for_each_engine(engine, i915, id) {
694                 int depth;
695
696                 if (!intel_engine_has_preemption(engine))
697                         continue;
698
699                 if (!engine->emit_init_breadcrumb)
700                         continue;
701
702                 for (depth = 0; depth < ARRAY_SIZE(client); depth++) {
703                         struct i915_request *rq[ARRAY_SIZE(client)];
704                         struct i915_request *dummy;
705
706                         engine->execlists.preempt_hang.count = 0;
707
708                         dummy = dummy_request(engine);
709                         if (!dummy)
710                                 goto err_client_3;
711
712                         for (i = 0; i < ARRAY_SIZE(client); i++) {
713                                 rq[i] = igt_spinner_create_request(&client[i].spin,
714                                                                    client[i].ctx, engine,
715                                                                    MI_NOOP);
716                                 if (IS_ERR(rq[i])) {
717                                         err = PTR_ERR(rq[i]);
718                                         goto err_wedged;
719                                 }
720
721                                 /* Disable NEWCLIENT promotion */
722                                 __i915_active_request_set(&rq[i]->timeline->last_request,
723                                                           dummy);
724                                 i915_request_add(rq[i]);
725                         }
726
727                         dummy_request_free(dummy);
728
729                         GEM_BUG_ON(i915_request_completed(rq[0]));
730                         if (!igt_wait_for_spinner(&client[0].spin, rq[0])) {
731                                 pr_err("%s: First client failed to start\n",
732                                        engine->name);
733                                 goto err_wedged;
734                         }
735                         GEM_BUG_ON(!i915_request_started(rq[0]));
736
737                         if (i915_request_wait(rq[depth],
738                                               I915_WAIT_LOCKED |
739                                               I915_WAIT_PRIORITY,
740                                               1) != -ETIME) {
741                                 pr_err("%s: Waiter depth:%d completed!\n",
742                                        engine->name, depth);
743                                 goto err_wedged;
744                         }
745
746                         for (i = 0; i < ARRAY_SIZE(client); i++)
747                                 igt_spinner_end(&client[i].spin);
748
749                         if (igt_flush_test(i915, I915_WAIT_LOCKED))
750                                 goto err_wedged;
751
752                         if (engine->execlists.preempt_hang.count) {
753                                 pr_err("%s: Preemption recorded x%d, depth %d; should have been suppressed!\n",
754                                        engine->name,
755                                        engine->execlists.preempt_hang.count,
756                                        depth);
757                                 err = -EINVAL;
758                                 goto err_client_3;
759                         }
760                 }
761         }
762
763         err = 0;
764 err_client_3:
765         preempt_client_fini(&client[3]);
766 err_client_2:
767         preempt_client_fini(&client[2]);
768 err_client_1:
769         preempt_client_fini(&client[1]);
770 err_client_0:
771         preempt_client_fini(&client[0]);
772 err_unlock:
773         if (igt_flush_test(i915, I915_WAIT_LOCKED))
774                 err = -EIO;
775         intel_runtime_pm_put(i915, wakeref);
776         mutex_unlock(&i915->drm.struct_mutex);
777         return err;
778
779 err_wedged:
780         for (i = 0; i < ARRAY_SIZE(client); i++)
781                 igt_spinner_end(&client[i].spin);
782         i915_gem_set_wedged(i915);
783         err = -EIO;
784         goto err_client_3;
785 }
786
787 static int live_chain_preempt(void *arg)
788 {
789         struct drm_i915_private *i915 = arg;
790         struct intel_engine_cs *engine;
791         struct preempt_client hi, lo;
792         enum intel_engine_id id;
793         intel_wakeref_t wakeref;
794         int err = -ENOMEM;
795
796         /*
797          * Build a chain AB...BA between two contexts (A, B) and request
798          * preemption of the last request. It should then complete before
799          * the previously submitted spinner in B.
800          */
801
802         if (!HAS_LOGICAL_RING_PREEMPTION(i915))
803                 return 0;
804
805         mutex_lock(&i915->drm.struct_mutex);
806         wakeref = intel_runtime_pm_get(i915);
807
808         if (preempt_client_init(i915, &hi))
809                 goto err_unlock;
810
811         if (preempt_client_init(i915, &lo))
812                 goto err_client_hi;
813
814         for_each_engine(engine, i915, id) {
815                 struct i915_sched_attr attr = {
816                         .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX),
817                 };
818                 struct igt_live_test t;
819                 struct i915_request *rq;
820                 int ring_size, count, i;
821
822                 if (!intel_engine_has_preemption(engine))
823                         continue;
824
825                 rq = igt_spinner_create_request(&lo.spin,
826                                                 lo.ctx, engine,
827                                                 MI_ARB_CHECK);
828                 if (IS_ERR(rq))
829                         goto err_wedged;
830                 i915_request_add(rq);
831
832                 ring_size = rq->wa_tail - rq->head;
833                 if (ring_size < 0)
834                         ring_size += rq->ring->size;
835                 ring_size = rq->ring->size / ring_size;
836                 pr_debug("%s(%s): Using maximum of %d requests\n",
837                          __func__, engine->name, ring_size);
838
839                 igt_spinner_end(&lo.spin);
840                 if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 2) < 0) {
841                         pr_err("Timed out waiting to flush %s\n", engine->name);
842                         goto err_wedged;
843                 }
844
845                 if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
846                         err = -EIO;
847                         goto err_wedged;
848                 }
849
850                 for_each_prime_number_from(count, 1, ring_size) {
851                         rq = igt_spinner_create_request(&hi.spin,
852                                                         hi.ctx, engine,
853                                                         MI_ARB_CHECK);
854                         if (IS_ERR(rq))
855                                 goto err_wedged;
856                         i915_request_add(rq);
857                         if (!igt_wait_for_spinner(&hi.spin, rq))
858                                 goto err_wedged;
859
860                         rq = igt_spinner_create_request(&lo.spin,
861                                                         lo.ctx, engine,
862                                                         MI_ARB_CHECK);
863                         if (IS_ERR(rq))
864                                 goto err_wedged;
865                         i915_request_add(rq);
866
867                         for (i = 0; i < count; i++) {
868                                 rq = igt_request_alloc(lo.ctx, engine);
869                                 if (IS_ERR(rq))
870                                         goto err_wedged;
871                                 i915_request_add(rq);
872                         }
873
874                         rq = igt_request_alloc(hi.ctx, engine);
875                         if (IS_ERR(rq))
876                                 goto err_wedged;
877                         i915_request_add(rq);
878                         engine->schedule(rq, &attr);
879
880                         igt_spinner_end(&hi.spin);
881                         if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0) {
882                                 struct drm_printer p =
883                                         drm_info_printer(i915->drm.dev);
884
885                                 pr_err("Failed to preempt over chain of %d\n",
886                                        count);
887                                 intel_engine_dump(engine, &p,
888                                                   "%s\n", engine->name);
889                                 goto err_wedged;
890                         }
891                         igt_spinner_end(&lo.spin);
892
893                         rq = igt_request_alloc(lo.ctx, engine);
894                         if (IS_ERR(rq))
895                                 goto err_wedged;
896                         i915_request_add(rq);
897                         if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0) {
898                                 struct drm_printer p =
899                                         drm_info_printer(i915->drm.dev);
900
901                                 pr_err("Failed to flush low priority chain of %d requests\n",
902                                        count);
903                                 intel_engine_dump(engine, &p,
904                                                   "%s\n", engine->name);
905                                 goto err_wedged;
906                         }
907                 }
908
909                 if (igt_live_test_end(&t)) {
910                         err = -EIO;
911                         goto err_wedged;
912                 }
913         }
914
915         err = 0;
916 err_client_lo:
917         preempt_client_fini(&lo);
918 err_client_hi:
919         preempt_client_fini(&hi);
920 err_unlock:
921         if (igt_flush_test(i915, I915_WAIT_LOCKED))
922                 err = -EIO;
923         intel_runtime_pm_put(i915, wakeref);
924         mutex_unlock(&i915->drm.struct_mutex);
925         return err;
926
927 err_wedged:
928         igt_spinner_end(&hi.spin);
929         igt_spinner_end(&lo.spin);
930         i915_gem_set_wedged(i915);
931         err = -EIO;
932         goto err_client_lo;
933 }
934
935 static int live_preempt_hang(void *arg)
936 {
937         struct drm_i915_private *i915 = arg;
938         struct i915_gem_context *ctx_hi, *ctx_lo;
939         struct igt_spinner spin_hi, spin_lo;
940         struct intel_engine_cs *engine;
941         enum intel_engine_id id;
942         intel_wakeref_t wakeref;
943         int err = -ENOMEM;
944
945         if (!HAS_LOGICAL_RING_PREEMPTION(i915))
946                 return 0;
947
948         if (!intel_has_reset_engine(i915))
949                 return 0;
950
951         mutex_lock(&i915->drm.struct_mutex);
952         wakeref = intel_runtime_pm_get(i915);
953
954         if (igt_spinner_init(&spin_hi, i915))
955                 goto err_unlock;
956
957         if (igt_spinner_init(&spin_lo, i915))
958                 goto err_spin_hi;
959
960         ctx_hi = kernel_context(i915);
961         if (!ctx_hi)
962                 goto err_spin_lo;
963         ctx_hi->sched.priority =
964                 I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
965
966         ctx_lo = kernel_context(i915);
967         if (!ctx_lo)
968                 goto err_ctx_hi;
969         ctx_lo->sched.priority =
970                 I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
971
972         for_each_engine(engine, i915, id) {
973                 struct i915_request *rq;
974
975                 if (!intel_engine_has_preemption(engine))
976                         continue;
977
978                 rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
979                                                 MI_ARB_CHECK);
980                 if (IS_ERR(rq)) {
981                         err = PTR_ERR(rq);
982                         goto err_ctx_lo;
983                 }
984
985                 i915_request_add(rq);
986                 if (!igt_wait_for_spinner(&spin_lo, rq)) {
987                         GEM_TRACE("lo spinner failed to start\n");
988                         GEM_TRACE_DUMP();
989                         i915_gem_set_wedged(i915);
990                         err = -EIO;
991                         goto err_ctx_lo;
992                 }
993
994                 rq = igt_spinner_create_request(&spin_hi, ctx_hi, engine,
995                                                 MI_ARB_CHECK);
996                 if (IS_ERR(rq)) {
997                         igt_spinner_end(&spin_lo);
998                         err = PTR_ERR(rq);
999                         goto err_ctx_lo;
1000                 }
1001
1002                 init_completion(&engine->execlists.preempt_hang.completion);
1003                 engine->execlists.preempt_hang.inject_hang = true;
1004
1005                 i915_request_add(rq);
1006
1007                 if (!wait_for_completion_timeout(&engine->execlists.preempt_hang.completion,
1008                                                  HZ / 10)) {
1009                         pr_err("Preemption did not occur within timeout!");
1010                         GEM_TRACE_DUMP();
1011                         i915_gem_set_wedged(i915);
1012                         err = -EIO;
1013                         goto err_ctx_lo;
1014                 }
1015
1016                 set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
1017                 i915_reset_engine(engine, NULL);
1018                 clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
1019
1020                 engine->execlists.preempt_hang.inject_hang = false;
1021
1022                 if (!igt_wait_for_spinner(&spin_hi, rq)) {
1023                         GEM_TRACE("hi spinner failed to start\n");
1024                         GEM_TRACE_DUMP();
1025                         i915_gem_set_wedged(i915);
1026                         err = -EIO;
1027                         goto err_ctx_lo;
1028                 }
1029
1030                 igt_spinner_end(&spin_hi);
1031                 igt_spinner_end(&spin_lo);
1032                 if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
1033                         err = -EIO;
1034                         goto err_ctx_lo;
1035                 }
1036         }
1037
1038         err = 0;
1039 err_ctx_lo:
1040         kernel_context_close(ctx_lo);
1041 err_ctx_hi:
1042         kernel_context_close(ctx_hi);
1043 err_spin_lo:
1044         igt_spinner_fini(&spin_lo);
1045 err_spin_hi:
1046         igt_spinner_fini(&spin_hi);
1047 err_unlock:
1048         igt_flush_test(i915, I915_WAIT_LOCKED);
1049         intel_runtime_pm_put(i915, wakeref);
1050         mutex_unlock(&i915->drm.struct_mutex);
1051         return err;
1052 }
1053
1054 static int random_range(struct rnd_state *rnd, int min, int max)
1055 {
1056         return i915_prandom_u32_max_state(max - min, rnd) + min;
1057 }
1058
1059 static int random_priority(struct rnd_state *rnd)
1060 {
1061         return random_range(rnd, I915_PRIORITY_MIN, I915_PRIORITY_MAX);
1062 }
1063
1064 struct preempt_smoke {
1065         struct drm_i915_private *i915;
1066         struct i915_gem_context **contexts;
1067         struct intel_engine_cs *engine;
1068         struct drm_i915_gem_object *batch;
1069         unsigned int ncontext;
1070         struct rnd_state prng;
1071         unsigned long count;
1072 };
1073
1074 static struct i915_gem_context *smoke_context(struct preempt_smoke *smoke)
1075 {
1076         return smoke->contexts[i915_prandom_u32_max_state(smoke->ncontext,
1077                                                           &smoke->prng)];
1078 }
1079
1080 static int smoke_submit(struct preempt_smoke *smoke,
1081                         struct i915_gem_context *ctx, int prio,
1082                         struct drm_i915_gem_object *batch)
1083 {
1084         struct i915_request *rq;
1085         struct i915_vma *vma = NULL;
1086         int err = 0;
1087
1088         if (batch) {
1089                 vma = i915_vma_instance(batch, &ctx->ppgtt->vm, NULL);
1090                 if (IS_ERR(vma))
1091                         return PTR_ERR(vma);
1092
1093                 err = i915_vma_pin(vma, 0, 0, PIN_USER);
1094                 if (err)
1095                         return err;
1096         }
1097
1098         ctx->sched.priority = prio;
1099
1100         rq = igt_request_alloc(ctx, smoke->engine);
1101         if (IS_ERR(rq)) {
1102                 err = PTR_ERR(rq);
1103                 goto unpin;
1104         }
1105
1106         if (vma) {
1107                 err = rq->engine->emit_bb_start(rq,
1108                                                 vma->node.start,
1109                                                 PAGE_SIZE, 0);
1110                 if (!err)
1111                         err = i915_vma_move_to_active(vma, rq, 0);
1112         }
1113
1114         i915_request_add(rq);
1115
1116 unpin:
1117         if (vma)
1118                 i915_vma_unpin(vma);
1119
1120         return err;
1121 }
1122
1123 static int smoke_crescendo_thread(void *arg)
1124 {
1125         struct preempt_smoke *smoke = arg;
1126         IGT_TIMEOUT(end_time);
1127         unsigned long count;
1128
1129         count = 0;
1130         do {
1131                 struct i915_gem_context *ctx = smoke_context(smoke);
1132                 int err;
1133
1134                 mutex_lock(&smoke->i915->drm.struct_mutex);
1135                 err = smoke_submit(smoke,
1136                                    ctx, count % I915_PRIORITY_MAX,
1137                                    smoke->batch);
1138                 mutex_unlock(&smoke->i915->drm.struct_mutex);
1139                 if (err)
1140                         return err;
1141
1142                 count++;
1143         } while (!__igt_timeout(end_time, NULL));
1144
1145         smoke->count = count;
1146         return 0;
1147 }
1148
1149 static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags)
1150 #define BATCH BIT(0)
1151 {
1152         struct task_struct *tsk[I915_NUM_ENGINES] = {};
1153         struct preempt_smoke arg[I915_NUM_ENGINES];
1154         struct intel_engine_cs *engine;
1155         enum intel_engine_id id;
1156         unsigned long count;
1157         int err = 0;
1158
1159         mutex_unlock(&smoke->i915->drm.struct_mutex);
1160
1161         for_each_engine(engine, smoke->i915, id) {
1162                 arg[id] = *smoke;
1163                 arg[id].engine = engine;
1164                 if (!(flags & BATCH))
1165                         arg[id].batch = NULL;
1166                 arg[id].count = 0;
1167
1168                 tsk[id] = kthread_run(smoke_crescendo_thread, &arg,
1169                                       "igt/smoke:%d", id);
1170                 if (IS_ERR(tsk[id])) {
1171                         err = PTR_ERR(tsk[id]);
1172                         break;
1173                 }
1174                 get_task_struct(tsk[id]);
1175         }
1176
1177         count = 0;
1178         for_each_engine(engine, smoke->i915, id) {
1179                 int status;
1180
1181                 if (IS_ERR_OR_NULL(tsk[id]))
1182                         continue;
1183
1184                 status = kthread_stop(tsk[id]);
1185                 if (status && !err)
1186                         err = status;
1187
1188                 count += arg[id].count;
1189
1190                 put_task_struct(tsk[id]);
1191         }
1192
1193         mutex_lock(&smoke->i915->drm.struct_mutex);
1194
1195         pr_info("Submitted %lu crescendo:%x requests across %d engines and %d contexts\n",
1196                 count, flags,
1197                 RUNTIME_INFO(smoke->i915)->num_engines, smoke->ncontext);
1198         return 0;
1199 }
1200
1201 static int smoke_random(struct preempt_smoke *smoke, unsigned int flags)
1202 {
1203         enum intel_engine_id id;
1204         IGT_TIMEOUT(end_time);
1205         unsigned long count;
1206
1207         count = 0;
1208         do {
1209                 for_each_engine(smoke->engine, smoke->i915, id) {
1210                         struct i915_gem_context *ctx = smoke_context(smoke);
1211                         int err;
1212
1213                         err = smoke_submit(smoke,
1214                                            ctx, random_priority(&smoke->prng),
1215                                            flags & BATCH ? smoke->batch : NULL);
1216                         if (err)
1217                                 return err;
1218
1219                         count++;
1220                 }
1221         } while (!__igt_timeout(end_time, NULL));
1222
1223         pr_info("Submitted %lu random:%x requests across %d engines and %d contexts\n",
1224                 count, flags,
1225                 RUNTIME_INFO(smoke->i915)->num_engines, smoke->ncontext);
1226         return 0;
1227 }
1228
1229 static int live_preempt_smoke(void *arg)
1230 {
1231         struct preempt_smoke smoke = {
1232                 .i915 = arg,
1233                 .prng = I915_RND_STATE_INITIALIZER(i915_selftest.random_seed),
1234                 .ncontext = 1024,
1235         };
1236         const unsigned int phase[] = { 0, BATCH };
1237         intel_wakeref_t wakeref;
1238         struct igt_live_test t;
1239         int err = -ENOMEM;
1240         u32 *cs;
1241         int n;
1242
1243         if (!HAS_LOGICAL_RING_PREEMPTION(smoke.i915))
1244                 return 0;
1245
1246         smoke.contexts = kmalloc_array(smoke.ncontext,
1247                                        sizeof(*smoke.contexts),
1248                                        GFP_KERNEL);
1249         if (!smoke.contexts)
1250                 return -ENOMEM;
1251
1252         mutex_lock(&smoke.i915->drm.struct_mutex);
1253         wakeref = intel_runtime_pm_get(smoke.i915);
1254
1255         smoke.batch = i915_gem_object_create_internal(smoke.i915, PAGE_SIZE);
1256         if (IS_ERR(smoke.batch)) {
1257                 err = PTR_ERR(smoke.batch);
1258                 goto err_unlock;
1259         }
1260
1261         cs = i915_gem_object_pin_map(smoke.batch, I915_MAP_WB);
1262         if (IS_ERR(cs)) {
1263                 err = PTR_ERR(cs);
1264                 goto err_batch;
1265         }
1266         for (n = 0; n < PAGE_SIZE / sizeof(*cs) - 1; n++)
1267                 cs[n] = MI_ARB_CHECK;
1268         cs[n] = MI_BATCH_BUFFER_END;
1269         i915_gem_object_flush_map(smoke.batch);
1270         i915_gem_object_unpin_map(smoke.batch);
1271
1272         if (igt_live_test_begin(&t, smoke.i915, __func__, "all")) {
1273                 err = -EIO;
1274                 goto err_batch;
1275         }
1276
1277         for (n = 0; n < smoke.ncontext; n++) {
1278                 smoke.contexts[n] = kernel_context(smoke.i915);
1279                 if (!smoke.contexts[n])
1280                         goto err_ctx;
1281         }
1282
1283         for (n = 0; n < ARRAY_SIZE(phase); n++) {
1284                 err = smoke_crescendo(&smoke, phase[n]);
1285                 if (err)
1286                         goto err_ctx;
1287
1288                 err = smoke_random(&smoke, phase[n]);
1289                 if (err)
1290                         goto err_ctx;
1291         }
1292
1293 err_ctx:
1294         if (igt_live_test_end(&t))
1295                 err = -EIO;
1296
1297         for (n = 0; n < smoke.ncontext; n++) {
1298                 if (!smoke.contexts[n])
1299                         break;
1300                 kernel_context_close(smoke.contexts[n]);
1301         }
1302
1303 err_batch:
1304         i915_gem_object_put(smoke.batch);
1305 err_unlock:
1306         intel_runtime_pm_put(smoke.i915, wakeref);
1307         mutex_unlock(&smoke.i915->drm.struct_mutex);
1308         kfree(smoke.contexts);
1309
1310         return err;
1311 }
1312
1313 int intel_execlists_live_selftests(struct drm_i915_private *i915)
1314 {
1315         static const struct i915_subtest tests[] = {
1316                 SUBTEST(live_sanitycheck),
1317                 SUBTEST(live_busywait_preempt),
1318                 SUBTEST(live_preempt),
1319                 SUBTEST(live_late_preempt),
1320                 SUBTEST(live_suppress_self_preempt),
1321                 SUBTEST(live_suppress_wait_preempt),
1322                 SUBTEST(live_chain_preempt),
1323                 SUBTEST(live_preempt_hang),
1324                 SUBTEST(live_preempt_smoke),
1325         };
1326
1327         if (!HAS_EXECLISTS(i915))
1328                 return 0;
1329
1330         if (i915_terminally_wedged(i915))
1331                 return 0;
1332
1333         return i915_subtests(tests, i915);
1334 }