]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/gpu/drm/i915/gt/intel_gt_requests.c
drm/i915/gt: Restore dropped 'interruptible' flag
[linux.git] / drivers / gpu / drm / i915 / gt / intel_gt_requests.c
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2019 Intel Corporation
5  */
6
7 #include "i915_request.h"
8 #include "intel_gt.h"
9 #include "intel_gt_pm.h"
10 #include "intel_gt_requests.h"
11 #include "intel_timeline.h"
12
13 static void retire_requests(struct intel_timeline *tl)
14 {
15         struct i915_request *rq, *rn;
16
17         list_for_each_entry_safe(rq, rn, &tl->requests, link)
18                 if (!i915_request_retire(rq))
19                         break;
20 }
21
22 long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
23 {
24         struct intel_gt_timelines *timelines = &gt->timelines;
25         struct intel_timeline *tl, *tn;
26         unsigned long active_count = 0;
27         unsigned long flags;
28         bool interruptible;
29         LIST_HEAD(free);
30
31         interruptible = true;
32         if (unlikely(timeout < 0))
33                 timeout = -timeout, interruptible = false;
34
35         spin_lock_irqsave(&timelines->lock, flags);
36         list_for_each_entry_safe(tl, tn, &timelines->active_list, link) {
37                 if (!mutex_trylock(&tl->mutex))
38                         continue;
39
40                 intel_timeline_get(tl);
41                 GEM_BUG_ON(!tl->active_count);
42                 tl->active_count++; /* pin the list element */
43                 spin_unlock_irqrestore(&timelines->lock, flags);
44
45                 if (timeout > 0) {
46                         struct dma_fence *fence;
47
48                         fence = i915_active_fence_get(&tl->last_request);
49                         if (fence) {
50                                 timeout = dma_fence_wait_timeout(fence,
51                                                                  interruptible,
52                                                                  timeout);
53                                 dma_fence_put(fence);
54                         }
55                 }
56
57                 retire_requests(tl);
58
59                 spin_lock_irqsave(&timelines->lock, flags);
60
61                 /* Resume iteration after dropping lock */
62                 list_safe_reset_next(tl, tn, link);
63                 if (--tl->active_count)
64                         active_count += !!rcu_access_pointer(tl->last_request.fence);
65                 else
66                         list_del(&tl->link);
67
68                 mutex_unlock(&tl->mutex);
69
70                 /* Defer the final release to after the spinlock */
71                 if (refcount_dec_and_test(&tl->kref.refcount)) {
72                         GEM_BUG_ON(tl->active_count);
73                         list_add(&tl->link, &free);
74                 }
75         }
76         spin_unlock_irqrestore(&timelines->lock, flags);
77
78         list_for_each_entry_safe(tl, tn, &free, link)
79                 __intel_timeline_free(&tl->kref);
80
81         return active_count ? timeout : 0;
82 }
83
84 int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout)
85 {
86         /* If the device is asleep, we have no requests outstanding */
87         if (!intel_gt_pm_is_awake(gt))
88                 return 0;
89
90         while ((timeout = intel_gt_retire_requests_timeout(gt, timeout)) > 0) {
91                 cond_resched();
92                 if (signal_pending(current))
93                         return -EINTR;
94         }
95
96         return timeout;
97 }
98
99 static void retire_work_handler(struct work_struct *work)
100 {
101         struct intel_gt *gt =
102                 container_of(work, typeof(*gt), requests.retire_work.work);
103
104         intel_gt_retire_requests(gt);
105         schedule_delayed_work(&gt->requests.retire_work,
106                               round_jiffies_up_relative(HZ));
107 }
108
109 void intel_gt_init_requests(struct intel_gt *gt)
110 {
111         INIT_DELAYED_WORK(&gt->requests.retire_work, retire_work_handler);
112 }
113
114 void intel_gt_park_requests(struct intel_gt *gt)
115 {
116         cancel_delayed_work(&gt->requests.retire_work);
117 }
118
119 void intel_gt_unpark_requests(struct intel_gt *gt)
120 {
121         schedule_delayed_work(&gt->requests.retire_work,
122                               round_jiffies_up_relative(HZ));
123 }