]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/gpu/drm/i915/i915_request.c
809d6ee10da6633c1701575d5bfa74f50a5bb6be
[linux.git] / drivers / gpu / drm / i915 / i915_request.c
1 /*
2  * Copyright © 2008-2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24
25 #include <linux/dma-fence-array.h>
26 #include <linux/irq_work.h>
27 #include <linux/prefetch.h>
28 #include <linux/sched.h>
29 #include <linux/sched/clock.h>
30 #include <linux/sched/signal.h>
31
32 #include "i915_active.h"
33 #include "i915_drv.h"
34 #include "i915_globals.h"
35 #include "intel_pm.h"
36
37 struct execute_cb {
38         struct list_head link;
39         struct irq_work work;
40         struct i915_sw_fence *fence;
41 };
42
43 static struct i915_global_request {
44         struct i915_global base;
45         struct kmem_cache *slab_requests;
46         struct kmem_cache *slab_dependencies;
47         struct kmem_cache *slab_execute_cbs;
48 } global;
49
50 static const char *i915_fence_get_driver_name(struct dma_fence *fence)
51 {
52         return "i915";
53 }
54
55 static const char *i915_fence_get_timeline_name(struct dma_fence *fence)
56 {
57         /*
58          * The timeline struct (as part of the ppgtt underneath a context)
59          * may be freed when the request is no longer in use by the GPU.
60          * We could extend the life of a context to beyond that of all
61          * fences, possibly keeping the hw resource around indefinitely,
62          * or we just give them a false name. Since
63          * dma_fence_ops.get_timeline_name is a debug feature, the occasional
64          * lie seems justifiable.
65          */
66         if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
67                 return "signaled";
68
69         return to_request(fence)->gem_context->name ?: "[i915]";
70 }
71
72 static bool i915_fence_signaled(struct dma_fence *fence)
73 {
74         return i915_request_completed(to_request(fence));
75 }
76
77 static bool i915_fence_enable_signaling(struct dma_fence *fence)
78 {
79         return i915_request_enable_breadcrumb(to_request(fence));
80 }
81
82 static signed long i915_fence_wait(struct dma_fence *fence,
83                                    bool interruptible,
84                                    signed long timeout)
85 {
86         return i915_request_wait(to_request(fence),
87                                  interruptible | I915_WAIT_PRIORITY,
88                                  timeout);
89 }
90
91 static void i915_fence_release(struct dma_fence *fence)
92 {
93         struct i915_request *rq = to_request(fence);
94
95         /*
96          * The request is put onto a RCU freelist (i.e. the address
97          * is immediately reused), mark the fences as being freed now.
98          * Otherwise the debugobjects for the fences are only marked as
99          * freed when the slab cache itself is freed, and so we would get
100          * caught trying to reuse dead objects.
101          */
102         i915_sw_fence_fini(&rq->submit);
103         i915_sw_fence_fini(&rq->semaphore);
104
105         kmem_cache_free(global.slab_requests, rq);
106 }
107
108 const struct dma_fence_ops i915_fence_ops = {
109         .get_driver_name = i915_fence_get_driver_name,
110         .get_timeline_name = i915_fence_get_timeline_name,
111         .enable_signaling = i915_fence_enable_signaling,
112         .signaled = i915_fence_signaled,
113         .wait = i915_fence_wait,
114         .release = i915_fence_release,
115 };
116
117 static inline void
118 i915_request_remove_from_client(struct i915_request *request)
119 {
120         struct drm_i915_file_private *file_priv;
121
122         file_priv = request->file_priv;
123         if (!file_priv)
124                 return;
125
126         spin_lock(&file_priv->mm.lock);
127         if (request->file_priv) {
128                 list_del(&request->client_link);
129                 request->file_priv = NULL;
130         }
131         spin_unlock(&file_priv->mm.lock);
132 }
133
134 static void advance_ring(struct i915_request *request)
135 {
136         struct intel_ring *ring = request->ring;
137         unsigned int tail;
138
139         /*
140          * We know the GPU must have read the request to have
141          * sent us the seqno + interrupt, so use the position
142          * of tail of the request to update the last known position
143          * of the GPU head.
144          *
145          * Note this requires that we are always called in request
146          * completion order.
147          */
148         GEM_BUG_ON(!list_is_first(&request->ring_link, &ring->request_list));
149         if (list_is_last(&request->ring_link, &ring->request_list)) {
150                 /*
151                  * We may race here with execlists resubmitting this request
152                  * as we retire it. The resubmission will move the ring->tail
153                  * forwards (to request->wa_tail). We either read the
154                  * current value that was written to hw, or the value that
155                  * is just about to be. Either works, if we miss the last two
156                  * noops - they are safe to be replayed on a reset.
157                  */
158                 tail = READ_ONCE(request->tail);
159                 list_del(&ring->active_link);
160         } else {
161                 tail = request->postfix;
162         }
163         list_del_init(&request->ring_link);
164
165         ring->head = tail;
166 }
167
168 static void free_capture_list(struct i915_request *request)
169 {
170         struct i915_capture_list *capture;
171
172         capture = request->capture_list;
173         while (capture) {
174                 struct i915_capture_list *next = capture->next;
175
176                 kfree(capture);
177                 capture = next;
178         }
179 }
180
181 static void __retire_engine_request(struct intel_engine_cs *engine,
182                                     struct i915_request *rq)
183 {
184         GEM_TRACE("%s(%s) fence %llx:%lld, current %d\n",
185                   __func__, engine->name,
186                   rq->fence.context, rq->fence.seqno,
187                   hwsp_seqno(rq));
188
189         GEM_BUG_ON(!i915_request_completed(rq));
190
191         local_irq_disable();
192
193         spin_lock(&engine->timeline.lock);
194         GEM_BUG_ON(!list_is_first(&rq->link, &engine->timeline.requests));
195         list_del_init(&rq->link);
196         spin_unlock(&engine->timeline.lock);
197
198         spin_lock(&rq->lock);
199         i915_request_mark_complete(rq);
200         if (!i915_request_signaled(rq))
201                 dma_fence_signal_locked(&rq->fence);
202         if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags))
203                 i915_request_cancel_breadcrumb(rq);
204         if (rq->waitboost) {
205                 GEM_BUG_ON(!atomic_read(&rq->i915->gt_pm.rps.num_waiters));
206                 atomic_dec(&rq->i915->gt_pm.rps.num_waiters);
207         }
208         spin_unlock(&rq->lock);
209
210         local_irq_enable();
211
212         /*
213          * The backing object for the context is done after switching to the
214          * *next* context. Therefore we cannot retire the previous context until
215          * the next context has already started running. However, since we
216          * cannot take the required locks at i915_request_submit() we
217          * defer the unpinning of the active context to now, retirement of
218          * the subsequent request.
219          */
220         if (engine->last_retired_context)
221                 intel_context_unpin(engine->last_retired_context);
222         engine->last_retired_context = rq->hw_context;
223 }
224
225 static void __retire_engine_upto(struct intel_engine_cs *engine,
226                                  struct i915_request *rq)
227 {
228         struct i915_request *tmp;
229
230         if (list_empty(&rq->link))
231                 return;
232
233         do {
234                 tmp = list_first_entry(&engine->timeline.requests,
235                                        typeof(*tmp), link);
236
237                 GEM_BUG_ON(tmp->engine != engine);
238                 __retire_engine_request(engine, tmp);
239         } while (tmp != rq);
240 }
241
242 static void i915_request_retire(struct i915_request *request)
243 {
244         struct i915_active_request *active, *next;
245
246         GEM_TRACE("%s fence %llx:%lld, current %d\n",
247                   request->engine->name,
248                   request->fence.context, request->fence.seqno,
249                   hwsp_seqno(request));
250
251         lockdep_assert_held(&request->i915->drm.struct_mutex);
252         GEM_BUG_ON(!i915_sw_fence_signaled(&request->submit));
253         GEM_BUG_ON(!i915_request_completed(request));
254
255         trace_i915_request_retire(request);
256
257         advance_ring(request);
258         free_capture_list(request);
259
260         /*
261          * Walk through the active list, calling retire on each. This allows
262          * objects to track their GPU activity and mark themselves as idle
263          * when their *last* active request is completed (updating state
264          * tracking lists for eviction, active references for GEM, etc).
265          *
266          * As the ->retire() may free the node, we decouple it first and
267          * pass along the auxiliary information (to avoid dereferencing
268          * the node after the callback).
269          */
270         list_for_each_entry_safe(active, next, &request->active_list, link) {
271                 /*
272                  * In microbenchmarks or focusing upon time inside the kernel,
273                  * we may spend an inordinate amount of time simply handling
274                  * the retirement of requests and processing their callbacks.
275                  * Of which, this loop itself is particularly hot due to the
276                  * cache misses when jumping around the list of
277                  * i915_active_request.  So we try to keep this loop as
278                  * streamlined as possible and also prefetch the next
279                  * i915_active_request to try and hide the likely cache miss.
280                  */
281                 prefetchw(next);
282
283                 INIT_LIST_HEAD(&active->link);
284                 RCU_INIT_POINTER(active->request, NULL);
285
286                 active->retire(active, request);
287         }
288
289         i915_request_remove_from_client(request);
290
291         __retire_engine_upto(request->engine, request);
292
293         intel_context_exit(request->hw_context);
294         intel_context_unpin(request->hw_context);
295
296         i915_sched_node_fini(&request->sched);
297         i915_request_put(request);
298 }
299
300 void i915_request_retire_upto(struct i915_request *rq)
301 {
302         struct intel_ring *ring = rq->ring;
303         struct i915_request *tmp;
304
305         GEM_TRACE("%s fence %llx:%lld, current %d\n",
306                   rq->engine->name,
307                   rq->fence.context, rq->fence.seqno,
308                   hwsp_seqno(rq));
309
310         lockdep_assert_held(&rq->i915->drm.struct_mutex);
311         GEM_BUG_ON(!i915_request_completed(rq));
312
313         if (list_empty(&rq->ring_link))
314                 return;
315
316         do {
317                 tmp = list_first_entry(&ring->request_list,
318                                        typeof(*tmp), ring_link);
319
320                 i915_request_retire(tmp);
321         } while (tmp != rq);
322 }
323
324 static void irq_execute_cb(struct irq_work *wrk)
325 {
326         struct execute_cb *cb = container_of(wrk, typeof(*cb), work);
327
328         i915_sw_fence_complete(cb->fence);
329         kmem_cache_free(global.slab_execute_cbs, cb);
330 }
331
332 static void __notify_execute_cb(struct i915_request *rq)
333 {
334         struct execute_cb *cb;
335
336         lockdep_assert_held(&rq->lock);
337
338         if (list_empty(&rq->execute_cb))
339                 return;
340
341         list_for_each_entry(cb, &rq->execute_cb, link)
342                 irq_work_queue(&cb->work);
343
344         /*
345          * XXX Rollback on __i915_request_unsubmit()
346          *
347          * In the future, perhaps when we have an active time-slicing scheduler,
348          * it will be interesting to unsubmit parallel execution and remove
349          * busywaits from the GPU until their master is restarted. This is
350          * quite hairy, we have to carefully rollback the fence and do a
351          * preempt-to-idle cycle on the target engine, all the while the
352          * master execute_cb may refire.
353          */
354         INIT_LIST_HEAD(&rq->execute_cb);
355 }
356
357 static int
358 i915_request_await_execution(struct i915_request *rq,
359                              struct i915_request *signal,
360                              gfp_t gfp)
361 {
362         struct execute_cb *cb;
363
364         if (i915_request_is_active(signal))
365                 return 0;
366
367         cb = kmem_cache_alloc(global.slab_execute_cbs, gfp);
368         if (!cb)
369                 return -ENOMEM;
370
371         cb->fence = &rq->submit;
372         i915_sw_fence_await(cb->fence);
373         init_irq_work(&cb->work, irq_execute_cb);
374
375         spin_lock_irq(&signal->lock);
376         if (i915_request_is_active(signal)) {
377                 i915_sw_fence_complete(cb->fence);
378                 kmem_cache_free(global.slab_execute_cbs, cb);
379         } else {
380                 list_add_tail(&cb->link, &signal->execute_cb);
381         }
382         spin_unlock_irq(&signal->lock);
383
384         return 0;
385 }
386
387 static void move_to_timeline(struct i915_request *request,
388                              struct i915_timeline *timeline)
389 {
390         GEM_BUG_ON(request->timeline == &request->engine->timeline);
391         lockdep_assert_held(&request->engine->timeline.lock);
392
393         spin_lock(&request->timeline->lock);
394         list_move_tail(&request->link, &timeline->requests);
395         spin_unlock(&request->timeline->lock);
396 }
397
398 void __i915_request_submit(struct i915_request *request)
399 {
400         struct intel_engine_cs *engine = request->engine;
401
402         GEM_TRACE("%s fence %llx:%lld -> current %d\n",
403                   engine->name,
404                   request->fence.context, request->fence.seqno,
405                   hwsp_seqno(request));
406
407         GEM_BUG_ON(!irqs_disabled());
408         lockdep_assert_held(&engine->timeline.lock);
409
410         if (i915_gem_context_is_banned(request->gem_context))
411                 i915_request_skip(request, -EIO);
412
413         /*
414          * Are we using semaphores when the gpu is already saturated?
415          *
416          * Using semaphores incurs a cost in having the GPU poll a
417          * memory location, busywaiting for it to change. The continual
418          * memory reads can have a noticeable impact on the rest of the
419          * system with the extra bus traffic, stalling the cpu as it too
420          * tries to access memory across the bus (perf stat -e bus-cycles).
421          *
422          * If we installed a semaphore on this request and we only submit
423          * the request after the signaler completed, that indicates the
424          * system is overloaded and using semaphores at this time only
425          * increases the amount of work we are doing. If so, we disable
426          * further use of semaphores until we are idle again, whence we
427          * optimistically try again.
428          */
429         if (request->sched.semaphores &&
430             i915_sw_fence_signaled(&request->semaphore))
431                 request->hw_context->saturated |= request->sched.semaphores;
432
433         /* We may be recursing from the signal callback of another i915 fence */
434         spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
435
436         GEM_BUG_ON(test_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags));
437         set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags);
438
439         if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags) &&
440             !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &request->fence.flags) &&
441             !i915_request_enable_breadcrumb(request))
442                 intel_engine_queue_breadcrumbs(engine);
443
444         __notify_execute_cb(request);
445
446         spin_unlock(&request->lock);
447
448         engine->emit_fini_breadcrumb(request,
449                                      request->ring->vaddr + request->postfix);
450
451         /* Transfer from per-context onto the global per-engine timeline */
452         move_to_timeline(request, &engine->timeline);
453
454         engine->serial++;
455
456         trace_i915_request_execute(request);
457 }
458
459 void i915_request_submit(struct i915_request *request)
460 {
461         struct intel_engine_cs *engine = request->engine;
462         unsigned long flags;
463
464         /* Will be called from irq-context when using foreign fences. */
465         spin_lock_irqsave(&engine->timeline.lock, flags);
466
467         __i915_request_submit(request);
468
469         spin_unlock_irqrestore(&engine->timeline.lock, flags);
470 }
471
472 void __i915_request_unsubmit(struct i915_request *request)
473 {
474         struct intel_engine_cs *engine = request->engine;
475
476         GEM_TRACE("%s fence %llx:%lld, current %d\n",
477                   engine->name,
478                   request->fence.context, request->fence.seqno,
479                   hwsp_seqno(request));
480
481         GEM_BUG_ON(!irqs_disabled());
482         lockdep_assert_held(&engine->timeline.lock);
483
484         /*
485          * Only unwind in reverse order, required so that the per-context list
486          * is kept in seqno/ring order.
487          */
488
489         /* We may be recursing from the signal callback of another i915 fence */
490         spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
491
492         if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags))
493                 i915_request_cancel_breadcrumb(request);
494
495         GEM_BUG_ON(!test_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags));
496         clear_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags);
497
498         spin_unlock(&request->lock);
499
500         /* Transfer back from the global per-engine timeline to per-context */
501         move_to_timeline(request, request->timeline);
502
503         /* We've already spun, don't charge on resubmitting. */
504         if (request->sched.semaphores && i915_request_started(request)) {
505                 request->sched.attr.priority |= I915_PRIORITY_NOSEMAPHORE;
506                 request->sched.semaphores = 0;
507         }
508
509         /*
510          * We don't need to wake_up any waiters on request->execute, they
511          * will get woken by any other event or us re-adding this request
512          * to the engine timeline (__i915_request_submit()). The waiters
513          * should be quite adapt at finding that the request now has a new
514          * global_seqno to the one they went to sleep on.
515          */
516 }
517
518 void i915_request_unsubmit(struct i915_request *request)
519 {
520         struct intel_engine_cs *engine = request->engine;
521         unsigned long flags;
522
523         /* Will be called from irq-context when using foreign fences. */
524         spin_lock_irqsave(&engine->timeline.lock, flags);
525
526         __i915_request_unsubmit(request);
527
528         spin_unlock_irqrestore(&engine->timeline.lock, flags);
529 }
530
531 static int __i915_sw_fence_call
532 submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
533 {
534         struct i915_request *request =
535                 container_of(fence, typeof(*request), submit);
536
537         switch (state) {
538         case FENCE_COMPLETE:
539                 trace_i915_request_submit(request);
540                 /*
541                  * We need to serialize use of the submit_request() callback
542                  * with its hotplugging performed during an emergency
543                  * i915_gem_set_wedged().  We use the RCU mechanism to mark the
544                  * critical section in order to force i915_gem_set_wedged() to
545                  * wait until the submit_request() is completed before
546                  * proceeding.
547                  */
548                 rcu_read_lock();
549                 request->engine->submit_request(request);
550                 rcu_read_unlock();
551                 break;
552
553         case FENCE_FREE:
554                 i915_request_put(request);
555                 break;
556         }
557
558         return NOTIFY_DONE;
559 }
560
561 static int __i915_sw_fence_call
562 semaphore_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
563 {
564         struct i915_request *request =
565                 container_of(fence, typeof(*request), semaphore);
566
567         switch (state) {
568         case FENCE_COMPLETE:
569                 i915_schedule_bump_priority(request, I915_PRIORITY_NOSEMAPHORE);
570                 break;
571
572         case FENCE_FREE:
573                 i915_request_put(request);
574                 break;
575         }
576
577         return NOTIFY_DONE;
578 }
579
580 static void ring_retire_requests(struct intel_ring *ring)
581 {
582         struct i915_request *rq, *rn;
583
584         list_for_each_entry_safe(rq, rn, &ring->request_list, ring_link) {
585                 if (!i915_request_completed(rq))
586                         break;
587
588                 i915_request_retire(rq);
589         }
590 }
591
592 static noinline struct i915_request *
593 request_alloc_slow(struct intel_context *ce, gfp_t gfp)
594 {
595         struct intel_ring *ring = ce->ring;
596         struct i915_request *rq;
597
598         if (list_empty(&ring->request_list))
599                 goto out;
600
601         if (!gfpflags_allow_blocking(gfp))
602                 goto out;
603
604         /* Ratelimit ourselves to prevent oom from malicious clients */
605         rq = list_last_entry(&ring->request_list, typeof(*rq), ring_link);
606         cond_synchronize_rcu(rq->rcustate);
607
608         /* Retire our old requests in the hope that we free some */
609         ring_retire_requests(ring);
610
611 out:
612         return kmem_cache_alloc(global.slab_requests, gfp);
613 }
614
615 struct i915_request *
616 __i915_request_create(struct intel_context *ce, gfp_t gfp)
617 {
618         struct i915_timeline *tl = ce->ring->timeline;
619         struct i915_request *rq;
620         u32 seqno;
621         int ret;
622
623         might_sleep_if(gfpflags_allow_blocking(gfp));
624
625         /* Check that the caller provided an already pinned context */
626         __intel_context_pin(ce);
627
628         /*
629          * Beware: Dragons be flying overhead.
630          *
631          * We use RCU to look up requests in flight. The lookups may
632          * race with the request being allocated from the slab freelist.
633          * That is the request we are writing to here, may be in the process
634          * of being read by __i915_active_request_get_rcu(). As such,
635          * we have to be very careful when overwriting the contents. During
636          * the RCU lookup, we change chase the request->engine pointer,
637          * read the request->global_seqno and increment the reference count.
638          *
639          * The reference count is incremented atomically. If it is zero,
640          * the lookup knows the request is unallocated and complete. Otherwise,
641          * it is either still in use, or has been reallocated and reset
642          * with dma_fence_init(). This increment is safe for release as we
643          * check that the request we have a reference to and matches the active
644          * request.
645          *
646          * Before we increment the refcount, we chase the request->engine
647          * pointer. We must not call kmem_cache_zalloc() or else we set
648          * that pointer to NULL and cause a crash during the lookup. If
649          * we see the request is completed (based on the value of the
650          * old engine and seqno), the lookup is complete and reports NULL.
651          * If we decide the request is not completed (new engine or seqno),
652          * then we grab a reference and double check that it is still the
653          * active request - which it won't be and restart the lookup.
654          *
655          * Do not use kmem_cache_zalloc() here!
656          */
657         rq = kmem_cache_alloc(global.slab_requests,
658                               gfp | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
659         if (unlikely(!rq)) {
660                 rq = request_alloc_slow(ce, gfp);
661                 if (!rq) {
662                         ret = -ENOMEM;
663                         goto err_unreserve;
664                 }
665         }
666
667         ret = i915_timeline_get_seqno(tl, rq, &seqno);
668         if (ret)
669                 goto err_free;
670
671         rq->i915 = ce->engine->i915;
672         rq->hw_context = ce;
673         rq->gem_context = ce->gem_context;
674         rq->engine = ce->engine;
675         rq->ring = ce->ring;
676         rq->timeline = tl;
677         GEM_BUG_ON(rq->timeline == &ce->engine->timeline);
678         rq->hwsp_seqno = tl->hwsp_seqno;
679         rq->hwsp_cacheline = tl->hwsp_cacheline;
680         rq->rcustate = get_state_synchronize_rcu(); /* acts as smp_mb() */
681
682         spin_lock_init(&rq->lock);
683         dma_fence_init(&rq->fence, &i915_fence_ops, &rq->lock,
684                        tl->fence_context, seqno);
685
686         /* We bump the ref for the fence chain */
687         i915_sw_fence_init(&i915_request_get(rq)->submit, submit_notify);
688         i915_sw_fence_init(&i915_request_get(rq)->semaphore, semaphore_notify);
689
690         i915_sched_node_init(&rq->sched);
691
692         /* No zalloc, must clear what we need by hand */
693         rq->file_priv = NULL;
694         rq->batch = NULL;
695         rq->capture_list = NULL;
696         rq->waitboost = false;
697         rq->execution_mask = ALL_ENGINES;
698
699         INIT_LIST_HEAD(&rq->active_list);
700         INIT_LIST_HEAD(&rq->execute_cb);
701
702         /*
703          * Reserve space in the ring buffer for all the commands required to
704          * eventually emit this request. This is to guarantee that the
705          * i915_request_add() call can't fail. Note that the reserve may need
706          * to be redone if the request is not actually submitted straight
707          * away, e.g. because a GPU scheduler has deferred it.
708          *
709          * Note that due to how we add reserved_space to intel_ring_begin()
710          * we need to double our request to ensure that if we need to wrap
711          * around inside i915_request_add() there is sufficient space at
712          * the beginning of the ring as well.
713          */
714         rq->reserved_space =
715                 2 * rq->engine->emit_fini_breadcrumb_dw * sizeof(u32);
716
717         /*
718          * Record the position of the start of the request so that
719          * should we detect the updated seqno part-way through the
720          * GPU processing the request, we never over-estimate the
721          * position of the head.
722          */
723         rq->head = rq->ring->emit;
724
725         ret = rq->engine->request_alloc(rq);
726         if (ret)
727                 goto err_unwind;
728
729         rq->infix = rq->ring->emit; /* end of header; start of user payload */
730
731         /* Keep a second pin for the dual retirement along engine and ring */
732         __intel_context_pin(ce);
733
734         intel_context_mark_active(ce);
735         return rq;
736
737 err_unwind:
738         ce->ring->emit = rq->head;
739
740         /* Make sure we didn't add ourselves to external state before freeing */
741         GEM_BUG_ON(!list_empty(&rq->active_list));
742         GEM_BUG_ON(!list_empty(&rq->sched.signalers_list));
743         GEM_BUG_ON(!list_empty(&rq->sched.waiters_list));
744
745 err_free:
746         kmem_cache_free(global.slab_requests, rq);
747 err_unreserve:
748         intel_context_unpin(ce);
749         return ERR_PTR(ret);
750 }
751
752 struct i915_request *
753 i915_request_create(struct intel_context *ce)
754 {
755         struct i915_request *rq;
756
757         intel_context_timeline_lock(ce);
758
759         /* Move our oldest request to the slab-cache (if not in use!) */
760         rq = list_first_entry(&ce->ring->request_list, typeof(*rq), ring_link);
761         if (!list_is_last(&rq->ring_link, &ce->ring->request_list) &&
762             i915_request_completed(rq))
763                 i915_request_retire(rq);
764
765         intel_context_enter(ce);
766         rq = __i915_request_create(ce, GFP_KERNEL);
767         intel_context_exit(ce); /* active reference transferred to request */
768         if (IS_ERR(rq))
769                 goto err_unlock;
770
771         /* Check that we do not interrupt ourselves with a new request */
772         rq->cookie = lockdep_pin_lock(&ce->ring->timeline->mutex);
773
774         return rq;
775
776 err_unlock:
777         intel_context_timeline_unlock(ce);
778         return rq;
779 }
780
781 static int
782 i915_request_await_start(struct i915_request *rq, struct i915_request *signal)
783 {
784         if (list_is_first(&signal->ring_link, &signal->ring->request_list))
785                 return 0;
786
787         signal = list_prev_entry(signal, ring_link);
788         if (i915_timeline_sync_is_later(rq->timeline, &signal->fence))
789                 return 0;
790
791         return i915_sw_fence_await_dma_fence(&rq->submit,
792                                              &signal->fence, 0,
793                                              I915_FENCE_GFP);
794 }
795
796 static intel_engine_mask_t
797 already_busywaiting(struct i915_request *rq)
798 {
799         /*
800          * Polling a semaphore causes bus traffic, delaying other users of
801          * both the GPU and CPU. We want to limit the impact on others,
802          * while taking advantage of early submission to reduce GPU
803          * latency. Therefore we restrict ourselves to not using more
804          * than one semaphore from each source, and not using a semaphore
805          * if we have detected the engine is saturated (i.e. would not be
806          * submitted early and cause bus traffic reading an already passed
807          * semaphore).
808          *
809          * See the are-we-too-late? check in __i915_request_submit().
810          */
811         return rq->sched.semaphores | rq->hw_context->saturated;
812 }
813
814 static int
815 emit_semaphore_wait(struct i915_request *to,
816                     struct i915_request *from,
817                     gfp_t gfp)
818 {
819         u32 hwsp_offset;
820         u32 *cs;
821         int err;
822
823         GEM_BUG_ON(!from->timeline->has_initial_breadcrumb);
824         GEM_BUG_ON(INTEL_GEN(to->i915) < 8);
825
826         /* Just emit the first semaphore we see as request space is limited. */
827         if (already_busywaiting(to) & from->engine->mask)
828                 return i915_sw_fence_await_dma_fence(&to->submit,
829                                                      &from->fence, 0,
830                                                      I915_FENCE_GFP);
831
832         err = i915_request_await_start(to, from);
833         if (err < 0)
834                 return err;
835
836         /* Only submit our spinner after the signaler is running! */
837         err = i915_request_await_execution(to, from, gfp);
838         if (err)
839                 return err;
840
841         /* We need to pin the signaler's HWSP until we are finished reading. */
842         err = i915_timeline_read_hwsp(from, to, &hwsp_offset);
843         if (err)
844                 return err;
845
846         cs = intel_ring_begin(to, 4);
847         if (IS_ERR(cs))
848                 return PTR_ERR(cs);
849
850         /*
851          * Using greater-than-or-equal here means we have to worry
852          * about seqno wraparound. To side step that issue, we swap
853          * the timeline HWSP upon wrapping, so that everyone listening
854          * for the old (pre-wrap) values do not see the much smaller
855          * (post-wrap) values than they were expecting (and so wait
856          * forever).
857          */
858         *cs++ = MI_SEMAPHORE_WAIT |
859                 MI_SEMAPHORE_GLOBAL_GTT |
860                 MI_SEMAPHORE_POLL |
861                 MI_SEMAPHORE_SAD_GTE_SDD;
862         *cs++ = from->fence.seqno;
863         *cs++ = hwsp_offset;
864         *cs++ = 0;
865
866         intel_ring_advance(to, cs);
867         to->sched.semaphores |= from->engine->mask;
868         to->sched.flags |= I915_SCHED_HAS_SEMAPHORE_CHAIN;
869         return 0;
870 }
871
872 static int
873 i915_request_await_request(struct i915_request *to, struct i915_request *from)
874 {
875         int ret;
876
877         GEM_BUG_ON(to == from);
878         GEM_BUG_ON(to->timeline == from->timeline);
879
880         if (i915_request_completed(from))
881                 return 0;
882
883         if (to->engine->schedule) {
884                 ret = i915_sched_node_add_dependency(&to->sched, &from->sched);
885                 if (ret < 0)
886                         return ret;
887         }
888
889         if (to->engine == from->engine) {
890                 ret = i915_sw_fence_await_sw_fence_gfp(&to->submit,
891                                                        &from->submit,
892                                                        I915_FENCE_GFP);
893         } else if (intel_engine_has_semaphores(to->engine) &&
894                    to->gem_context->sched.priority >= I915_PRIORITY_NORMAL) {
895                 ret = emit_semaphore_wait(to, from, I915_FENCE_GFP);
896         } else {
897                 ret = i915_sw_fence_await_dma_fence(&to->submit,
898                                                     &from->fence, 0,
899                                                     I915_FENCE_GFP);
900         }
901         if (ret < 0)
902                 return ret;
903
904         if (to->sched.flags & I915_SCHED_HAS_SEMAPHORE_CHAIN) {
905                 ret = i915_sw_fence_await_dma_fence(&to->semaphore,
906                                                     &from->fence, 0,
907                                                     I915_FENCE_GFP);
908                 if (ret < 0)
909                         return ret;
910         }
911
912         return 0;
913 }
914
915 int
916 i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
917 {
918         struct dma_fence **child = &fence;
919         unsigned int nchild = 1;
920         int ret;
921
922         /*
923          * Note that if the fence-array was created in signal-on-any mode,
924          * we should *not* decompose it into its individual fences. However,
925          * we don't currently store which mode the fence-array is operating
926          * in. Fortunately, the only user of signal-on-any is private to
927          * amdgpu and we should not see any incoming fence-array from
928          * sync-file being in signal-on-any mode.
929          */
930         if (dma_fence_is_array(fence)) {
931                 struct dma_fence_array *array = to_dma_fence_array(fence);
932
933                 child = array->fences;
934                 nchild = array->num_fences;
935                 GEM_BUG_ON(!nchild);
936         }
937
938         do {
939                 fence = *child++;
940                 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
941                         continue;
942
943                 /*
944                  * Requests on the same timeline are explicitly ordered, along
945                  * with their dependencies, by i915_request_add() which ensures
946                  * that requests are submitted in-order through each ring.
947                  */
948                 if (fence->context == rq->fence.context)
949                         continue;
950
951                 /* Squash repeated waits to the same timelines */
952                 if (fence->context != rq->i915->mm.unordered_timeline &&
953                     i915_timeline_sync_is_later(rq->timeline, fence))
954                         continue;
955
956                 if (dma_fence_is_i915(fence))
957                         ret = i915_request_await_request(rq, to_request(fence));
958                 else
959                         ret = i915_sw_fence_await_dma_fence(&rq->submit, fence,
960                                                             I915_FENCE_TIMEOUT,
961                                                             I915_FENCE_GFP);
962                 if (ret < 0)
963                         return ret;
964
965                 /* Record the latest fence used against each timeline */
966                 if (fence->context != rq->i915->mm.unordered_timeline)
967                         i915_timeline_sync_set(rq->timeline, fence);
968         } while (--nchild);
969
970         return 0;
971 }
972
973 /**
974  * i915_request_await_object - set this request to (async) wait upon a bo
975  * @to: request we are wishing to use
976  * @obj: object which may be in use on another ring.
977  * @write: whether the wait is on behalf of a writer
978  *
979  * This code is meant to abstract object synchronization with the GPU.
980  * Conceptually we serialise writes between engines inside the GPU.
981  * We only allow one engine to write into a buffer at any time, but
982  * multiple readers. To ensure each has a coherent view of memory, we must:
983  *
984  * - If there is an outstanding write request to the object, the new
985  *   request must wait for it to complete (either CPU or in hw, requests
986  *   on the same ring will be naturally ordered).
987  *
988  * - If we are a write request (pending_write_domain is set), the new
989  *   request must wait for outstanding read requests to complete.
990  *
991  * Returns 0 if successful, else propagates up the lower layer error.
992  */
993 int
994 i915_request_await_object(struct i915_request *to,
995                           struct drm_i915_gem_object *obj,
996                           bool write)
997 {
998         struct dma_fence *excl;
999         int ret = 0;
1000
1001         if (write) {
1002                 struct dma_fence **shared;
1003                 unsigned int count, i;
1004
1005                 ret = reservation_object_get_fences_rcu(obj->resv,
1006                                                         &excl, &count, &shared);
1007                 if (ret)
1008                         return ret;
1009
1010                 for (i = 0; i < count; i++) {
1011                         ret = i915_request_await_dma_fence(to, shared[i]);
1012                         if (ret)
1013                                 break;
1014
1015                         dma_fence_put(shared[i]);
1016                 }
1017
1018                 for (; i < count; i++)
1019                         dma_fence_put(shared[i]);
1020                 kfree(shared);
1021         } else {
1022                 excl = reservation_object_get_excl_rcu(obj->resv);
1023         }
1024
1025         if (excl) {
1026                 if (ret == 0)
1027                         ret = i915_request_await_dma_fence(to, excl);
1028
1029                 dma_fence_put(excl);
1030         }
1031
1032         return ret;
1033 }
1034
1035 void i915_request_skip(struct i915_request *rq, int error)
1036 {
1037         void *vaddr = rq->ring->vaddr;
1038         u32 head;
1039
1040         GEM_BUG_ON(!IS_ERR_VALUE((long)error));
1041         dma_fence_set_error(&rq->fence, error);
1042
1043         /*
1044          * As this request likely depends on state from the lost
1045          * context, clear out all the user operations leaving the
1046          * breadcrumb at the end (so we get the fence notifications).
1047          */
1048         head = rq->infix;
1049         if (rq->postfix < head) {
1050                 memset(vaddr + head, 0, rq->ring->size - head);
1051                 head = 0;
1052         }
1053         memset(vaddr + head, 0, rq->postfix - head);
1054 }
1055
1056 static struct i915_request *
1057 __i915_request_add_to_timeline(struct i915_request *rq)
1058 {
1059         struct i915_timeline *timeline = rq->timeline;
1060         struct i915_request *prev;
1061
1062         /*
1063          * Dependency tracking and request ordering along the timeline
1064          * is special cased so that we can eliminate redundant ordering
1065          * operations while building the request (we know that the timeline
1066          * itself is ordered, and here we guarantee it).
1067          *
1068          * As we know we will need to emit tracking along the timeline,
1069          * we embed the hooks into our request struct -- at the cost of
1070          * having to have specialised no-allocation interfaces (which will
1071          * be beneficial elsewhere).
1072          *
1073          * A second benefit to open-coding i915_request_await_request is
1074          * that we can apply a slight variant of the rules specialised
1075          * for timelines that jump between engines (such as virtual engines).
1076          * If we consider the case of virtual engine, we must emit a dma-fence
1077          * to prevent scheduling of the second request until the first is
1078          * complete (to maximise our greedy late load balancing) and this
1079          * precludes optimising to use semaphores serialisation of a single
1080          * timeline across engines.
1081          */
1082         prev = rcu_dereference_protected(timeline->last_request.request, 1);
1083         if (prev && !i915_request_completed(prev)) {
1084                 if (is_power_of_2(prev->engine->mask | rq->engine->mask))
1085                         i915_sw_fence_await_sw_fence(&rq->submit,
1086                                                      &prev->submit,
1087                                                      &rq->submitq);
1088                 else
1089                         __i915_sw_fence_await_dma_fence(&rq->submit,
1090                                                         &prev->fence,
1091                                                         &rq->dmaq);
1092                 if (rq->engine->schedule)
1093                         __i915_sched_node_add_dependency(&rq->sched,
1094                                                          &prev->sched,
1095                                                          &rq->dep,
1096                                                          0);
1097         }
1098
1099         spin_lock_irq(&timeline->lock);
1100         list_add_tail(&rq->link, &timeline->requests);
1101         spin_unlock_irq(&timeline->lock);
1102
1103         /*
1104          * Make sure that no request gazumped us - if it was allocated after
1105          * our i915_request_alloc() and called __i915_request_add() before
1106          * us, the timeline will hold its seqno which is later than ours.
1107          */
1108         GEM_BUG_ON(timeline->seqno != rq->fence.seqno);
1109         __i915_active_request_set(&timeline->last_request, rq);
1110
1111         return prev;
1112 }
1113
1114 /*
1115  * NB: This function is not allowed to fail. Doing so would mean the the
1116  * request is not being tracked for completion but the work itself is
1117  * going to happen on the hardware. This would be a Bad Thing(tm).
1118  */
1119 struct i915_request *__i915_request_commit(struct i915_request *rq)
1120 {
1121         struct intel_engine_cs *engine = rq->engine;
1122         struct intel_ring *ring = rq->ring;
1123         struct i915_request *prev;
1124         u32 *cs;
1125
1126         GEM_TRACE("%s fence %llx:%lld\n",
1127                   engine->name, rq->fence.context, rq->fence.seqno);
1128
1129         /*
1130          * To ensure that this call will not fail, space for its emissions
1131          * should already have been reserved in the ring buffer. Let the ring
1132          * know that it is time to use that space up.
1133          */
1134         GEM_BUG_ON(rq->reserved_space > ring->space);
1135         rq->reserved_space = 0;
1136
1137         /*
1138          * Record the position of the start of the breadcrumb so that
1139          * should we detect the updated seqno part-way through the
1140          * GPU processing the request, we never over-estimate the
1141          * position of the ring's HEAD.
1142          */
1143         cs = intel_ring_begin(rq, engine->emit_fini_breadcrumb_dw);
1144         GEM_BUG_ON(IS_ERR(cs));
1145         rq->postfix = intel_ring_offset(rq, cs);
1146
1147         prev = __i915_request_add_to_timeline(rq);
1148
1149         list_add_tail(&rq->ring_link, &ring->request_list);
1150         if (list_is_first(&rq->ring_link, &ring->request_list))
1151                 list_add(&ring->active_link, &rq->i915->gt.active_rings);
1152         rq->emitted_jiffies = jiffies;
1153
1154         /*
1155          * Let the backend know a new request has arrived that may need
1156          * to adjust the existing execution schedule due to a high priority
1157          * request - i.e. we may want to preempt the current request in order
1158          * to run a high priority dependency chain *before* we can execute this
1159          * request.
1160          *
1161          * This is called before the request is ready to run so that we can
1162          * decide whether to preempt the entire chain so that it is ready to
1163          * run at the earliest possible convenience.
1164          */
1165         local_bh_disable();
1166         i915_sw_fence_commit(&rq->semaphore);
1167         rcu_read_lock(); /* RCU serialisation for set-wedged protection */
1168         if (engine->schedule) {
1169                 struct i915_sched_attr attr = rq->gem_context->sched;
1170
1171                 /*
1172                  * Boost actual workloads past semaphores!
1173                  *
1174                  * With semaphores we spin on one engine waiting for another,
1175                  * simply to reduce the latency of starting our work when
1176                  * the signaler completes. However, if there is any other
1177                  * work that we could be doing on this engine instead, that
1178                  * is better utilisation and will reduce the overall duration
1179                  * of the current work. To avoid PI boosting a semaphore
1180                  * far in the distance past over useful work, we keep a history
1181                  * of any semaphore use along our dependency chain.
1182                  */
1183                 if (!(rq->sched.flags & I915_SCHED_HAS_SEMAPHORE_CHAIN))
1184                         attr.priority |= I915_PRIORITY_NOSEMAPHORE;
1185
1186                 /*
1187                  * Boost priorities to new clients (new request flows).
1188                  *
1189                  * Allow interactive/synchronous clients to jump ahead of
1190                  * the bulk clients. (FQ_CODEL)
1191                  */
1192                 if (list_empty(&rq->sched.signalers_list))
1193                         attr.priority |= I915_PRIORITY_WAIT;
1194
1195                 engine->schedule(rq, &attr);
1196         }
1197         rcu_read_unlock();
1198         i915_sw_fence_commit(&rq->submit);
1199         local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
1200
1201         return prev;
1202 }
1203
1204 void i915_request_add(struct i915_request *rq)
1205 {
1206         struct i915_request *prev;
1207
1208         lockdep_assert_held(&rq->timeline->mutex);
1209         lockdep_unpin_lock(&rq->timeline->mutex, rq->cookie);
1210
1211         trace_i915_request_add(rq);
1212
1213         prev = __i915_request_commit(rq);
1214
1215         /*
1216          * In typical scenarios, we do not expect the previous request on
1217          * the timeline to be still tracked by timeline->last_request if it
1218          * has been completed. If the completed request is still here, that
1219          * implies that request retirement is a long way behind submission,
1220          * suggesting that we haven't been retiring frequently enough from
1221          * the combination of retire-before-alloc, waiters and the background
1222          * retirement worker. So if the last request on this timeline was
1223          * already completed, do a catch up pass, flushing the retirement queue
1224          * up to this client. Since we have now moved the heaviest operations
1225          * during retirement onto secondary workers, such as freeing objects
1226          * or contexts, retiring a bunch of requests is mostly list management
1227          * (and cache misses), and so we should not be overly penalizing this
1228          * client by performing excess work, though we may still performing
1229          * work on behalf of others -- but instead we should benefit from
1230          * improved resource management. (Well, that's the theory at least.)
1231          */
1232         if (prev && i915_request_completed(prev))
1233                 i915_request_retire_upto(prev);
1234
1235         mutex_unlock(&rq->timeline->mutex);
1236 }
1237
1238 static unsigned long local_clock_us(unsigned int *cpu)
1239 {
1240         unsigned long t;
1241
1242         /*
1243          * Cheaply and approximately convert from nanoseconds to microseconds.
1244          * The result and subsequent calculations are also defined in the same
1245          * approximate microseconds units. The principal source of timing
1246          * error here is from the simple truncation.
1247          *
1248          * Note that local_clock() is only defined wrt to the current CPU;
1249          * the comparisons are no longer valid if we switch CPUs. Instead of
1250          * blocking preemption for the entire busywait, we can detect the CPU
1251          * switch and use that as indicator of system load and a reason to
1252          * stop busywaiting, see busywait_stop().
1253          */
1254         *cpu = get_cpu();
1255         t = local_clock() >> 10;
1256         put_cpu();
1257
1258         return t;
1259 }
1260
1261 static bool busywait_stop(unsigned long timeout, unsigned int cpu)
1262 {
1263         unsigned int this_cpu;
1264
1265         if (time_after(local_clock_us(&this_cpu), timeout))
1266                 return true;
1267
1268         return this_cpu != cpu;
1269 }
1270
1271 static bool __i915_spin_request(const struct i915_request * const rq,
1272                                 int state, unsigned long timeout_us)
1273 {
1274         unsigned int cpu;
1275
1276         /*
1277          * Only wait for the request if we know it is likely to complete.
1278          *
1279          * We don't track the timestamps around requests, nor the average
1280          * request length, so we do not have a good indicator that this
1281          * request will complete within the timeout. What we do know is the
1282          * order in which requests are executed by the context and so we can
1283          * tell if the request has been started. If the request is not even
1284          * running yet, it is a fair assumption that it will not complete
1285          * within our relatively short timeout.
1286          */
1287         if (!i915_request_is_running(rq))
1288                 return false;
1289
1290         /*
1291          * When waiting for high frequency requests, e.g. during synchronous
1292          * rendering split between the CPU and GPU, the finite amount of time
1293          * required to set up the irq and wait upon it limits the response
1294          * rate. By busywaiting on the request completion for a short while we
1295          * can service the high frequency waits as quick as possible. However,
1296          * if it is a slow request, we want to sleep as quickly as possible.
1297          * The tradeoff between waiting and sleeping is roughly the time it
1298          * takes to sleep on a request, on the order of a microsecond.
1299          */
1300
1301         timeout_us += local_clock_us(&cpu);
1302         do {
1303                 if (i915_request_completed(rq))
1304                         return true;
1305
1306                 if (signal_pending_state(state, current))
1307                         break;
1308
1309                 if (busywait_stop(timeout_us, cpu))
1310                         break;
1311
1312                 cpu_relax();
1313         } while (!need_resched());
1314
1315         return false;
1316 }
1317
1318 struct request_wait {
1319         struct dma_fence_cb cb;
1320         struct task_struct *tsk;
1321 };
1322
1323 static void request_wait_wake(struct dma_fence *fence, struct dma_fence_cb *cb)
1324 {
1325         struct request_wait *wait = container_of(cb, typeof(*wait), cb);
1326
1327         wake_up_process(wait->tsk);
1328 }
1329
1330 /**
1331  * i915_request_wait - wait until execution of request has finished
1332  * @rq: the request to wait upon
1333  * @flags: how to wait
1334  * @timeout: how long to wait in jiffies
1335  *
1336  * i915_request_wait() waits for the request to be completed, for a
1337  * maximum of @timeout jiffies (with MAX_SCHEDULE_TIMEOUT implying an
1338  * unbounded wait).
1339  *
1340  * If the caller holds the struct_mutex, the caller must pass I915_WAIT_LOCKED
1341  * in via the flags, and vice versa if the struct_mutex is not held, the caller
1342  * must not specify that the wait is locked.
1343  *
1344  * Returns the remaining time (in jiffies) if the request completed, which may
1345  * be zero or -ETIME if the request is unfinished after the timeout expires.
1346  * May return -EINTR is called with I915_WAIT_INTERRUPTIBLE and a signal is
1347  * pending before the request completes.
1348  */
1349 long i915_request_wait(struct i915_request *rq,
1350                        unsigned int flags,
1351                        long timeout)
1352 {
1353         const int state = flags & I915_WAIT_INTERRUPTIBLE ?
1354                 TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
1355         struct request_wait wait;
1356
1357         might_sleep();
1358         GEM_BUG_ON(timeout < 0);
1359
1360         if (i915_request_completed(rq))
1361                 return timeout;
1362
1363         if (!timeout)
1364                 return -ETIME;
1365
1366         trace_i915_request_wait_begin(rq, flags);
1367
1368         /*
1369          * Optimistic spin before touching IRQs.
1370          *
1371          * We may use a rather large value here to offset the penalty of
1372          * switching away from the active task. Frequently, the client will
1373          * wait upon an old swapbuffer to throttle itself to remain within a
1374          * frame of the gpu. If the client is running in lockstep with the gpu,
1375          * then it should not be waiting long at all, and a sleep now will incur
1376          * extra scheduler latency in producing the next frame. To try to
1377          * avoid adding the cost of enabling/disabling the interrupt to the
1378          * short wait, we first spin to see if the request would have completed
1379          * in the time taken to setup the interrupt.
1380          *
1381          * We need upto 5us to enable the irq, and upto 20us to hide the
1382          * scheduler latency of a context switch, ignoring the secondary
1383          * impacts from a context switch such as cache eviction.
1384          *
1385          * The scheme used for low-latency IO is called "hybrid interrupt
1386          * polling". The suggestion there is to sleep until just before you
1387          * expect to be woken by the device interrupt and then poll for its
1388          * completion. That requires having a good predictor for the request
1389          * duration, which we currently lack.
1390          */
1391         if (CONFIG_DRM_I915_SPIN_REQUEST &&
1392             __i915_spin_request(rq, state, CONFIG_DRM_I915_SPIN_REQUEST))
1393                 goto out;
1394
1395         /*
1396          * This client is about to stall waiting for the GPU. In many cases
1397          * this is undesirable and limits the throughput of the system, as
1398          * many clients cannot continue processing user input/output whilst
1399          * blocked. RPS autotuning may take tens of milliseconds to respond
1400          * to the GPU load and thus incurs additional latency for the client.
1401          * We can circumvent that by promoting the GPU frequency to maximum
1402          * before we sleep. This makes the GPU throttle up much more quickly
1403          * (good for benchmarks and user experience, e.g. window animations),
1404          * but at a cost of spending more power processing the workload
1405          * (bad for battery).
1406          */
1407         if (flags & I915_WAIT_PRIORITY) {
1408                 if (!i915_request_started(rq) && INTEL_GEN(rq->i915) >= 6)
1409                         gen6_rps_boost(rq);
1410                 i915_schedule_bump_priority(rq, I915_PRIORITY_WAIT);
1411         }
1412
1413         wait.tsk = current;
1414         if (dma_fence_add_callback(&rq->fence, &wait.cb, request_wait_wake))
1415                 goto out;
1416
1417         for (;;) {
1418                 set_current_state(state);
1419
1420                 if (i915_request_completed(rq))
1421                         break;
1422
1423                 if (signal_pending_state(state, current)) {
1424                         timeout = -ERESTARTSYS;
1425                         break;
1426                 }
1427
1428                 if (!timeout) {
1429                         timeout = -ETIME;
1430                         break;
1431                 }
1432
1433                 timeout = io_schedule_timeout(timeout);
1434         }
1435         __set_current_state(TASK_RUNNING);
1436
1437         dma_fence_remove_callback(&rq->fence, &wait.cb);
1438
1439 out:
1440         trace_i915_request_wait_end(rq);
1441         return timeout;
1442 }
1443
1444 bool i915_retire_requests(struct drm_i915_private *i915)
1445 {
1446         struct intel_ring *ring, *tmp;
1447
1448         lockdep_assert_held(&i915->drm.struct_mutex);
1449
1450         list_for_each_entry_safe(ring, tmp,
1451                                  &i915->gt.active_rings, active_link) {
1452                 intel_ring_get(ring); /* last rq holds reference! */
1453                 ring_retire_requests(ring);
1454                 intel_ring_put(ring);
1455         }
1456
1457         return !list_empty(&i915->gt.active_rings);
1458 }
1459
1460 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1461 #include "selftests/mock_request.c"
1462 #include "selftests/i915_request.c"
1463 #endif
1464
1465 static void i915_global_request_shrink(void)
1466 {
1467         kmem_cache_shrink(global.slab_dependencies);
1468         kmem_cache_shrink(global.slab_execute_cbs);
1469         kmem_cache_shrink(global.slab_requests);
1470 }
1471
1472 static void i915_global_request_exit(void)
1473 {
1474         kmem_cache_destroy(global.slab_dependencies);
1475         kmem_cache_destroy(global.slab_execute_cbs);
1476         kmem_cache_destroy(global.slab_requests);
1477 }
1478
1479 static struct i915_global_request global = { {
1480         .shrink = i915_global_request_shrink,
1481         .exit = i915_global_request_exit,
1482 } };
1483
1484 int __init i915_global_request_init(void)
1485 {
1486         global.slab_requests = KMEM_CACHE(i915_request,
1487                                           SLAB_HWCACHE_ALIGN |
1488                                           SLAB_RECLAIM_ACCOUNT |
1489                                           SLAB_TYPESAFE_BY_RCU);
1490         if (!global.slab_requests)
1491                 return -ENOMEM;
1492
1493         global.slab_execute_cbs = KMEM_CACHE(execute_cb,
1494                                              SLAB_HWCACHE_ALIGN |
1495                                              SLAB_RECLAIM_ACCOUNT |
1496                                              SLAB_TYPESAFE_BY_RCU);
1497         if (!global.slab_execute_cbs)
1498                 goto err_requests;
1499
1500         global.slab_dependencies = KMEM_CACHE(i915_dependency,
1501                                               SLAB_HWCACHE_ALIGN |
1502                                               SLAB_RECLAIM_ACCOUNT);
1503         if (!global.slab_dependencies)
1504                 goto err_execute_cbs;
1505
1506         i915_global_register(&global.base);
1507         return 0;
1508
1509 err_execute_cbs:
1510         kmem_cache_destroy(global.slab_execute_cbs);
1511 err_requests:
1512         kmem_cache_destroy(global.slab_requests);
1513         return -ENOMEM;
1514 }