]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/gpu/drm/i915/intel_wakeref.c
d4443e81c1c8957910c385764284052bc62f4e44
[linux.git] / drivers / gpu / drm / i915 / intel_wakeref.c
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2019 Intel Corporation
5  */
6
7 #include <linux/wait_bit.h>
8
9 #include "intel_runtime_pm.h"
10 #include "intel_wakeref.h"
11
12 static void rpm_get(struct intel_wakeref *wf)
13 {
14         wf->wakeref = intel_runtime_pm_get(wf->rpm);
15 }
16
17 static void rpm_put(struct intel_wakeref *wf)
18 {
19         intel_wakeref_t wakeref = fetch_and_zero(&wf->wakeref);
20
21         intel_runtime_pm_put(wf->rpm, wakeref);
22         INTEL_WAKEREF_BUG_ON(!wakeref);
23 }
24
25 int __intel_wakeref_get_first(struct intel_wakeref *wf)
26 {
27         /*
28          * Treat get/put as different subclasses, as we may need to run
29          * the put callback from under the shrinker and do not want to
30          * cross-contanimate that callback with any extra work performed
31          * upon acquiring the wakeref.
32          */
33         mutex_lock_nested(&wf->mutex, SINGLE_DEPTH_NESTING);
34         if (!atomic_read(&wf->count)) {
35                 int err;
36
37                 rpm_get(wf);
38
39                 err = wf->ops->get(wf);
40                 if (unlikely(err)) {
41                         rpm_put(wf);
42                         mutex_unlock(&wf->mutex);
43                         return err;
44                 }
45
46                 smp_mb__before_atomic(); /* release wf->count */
47         }
48         atomic_inc(&wf->count);
49         mutex_unlock(&wf->mutex);
50
51         INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0);
52         return 0;
53 }
54
55 static void ____intel_wakeref_put_last(struct intel_wakeref *wf)
56 {
57         if (!atomic_dec_and_test(&wf->count))
58                 goto unlock;
59
60         if (likely(!wf->ops->put(wf))) {
61                 rpm_put(wf);
62                 wake_up_var(&wf->wakeref);
63         } else {
64                 /* ops->put() must schedule its own release on deferral */
65                 atomic_set_release(&wf->count, 1);
66         }
67
68 unlock:
69         mutex_unlock(&wf->mutex);
70 }
71
72 void __intel_wakeref_put_last(struct intel_wakeref *wf)
73 {
74         INTEL_WAKEREF_BUG_ON(work_pending(&wf->work));
75
76         /* Assume we are not in process context and so cannot sleep. */
77         if (wf->ops->flags & INTEL_WAKEREF_PUT_ASYNC ||
78             !mutex_trylock(&wf->mutex)) {
79                 schedule_work(&wf->work);
80                 return;
81         }
82
83         ____intel_wakeref_put_last(wf);
84 }
85
86 static void __intel_wakeref_put_work(struct work_struct *wrk)
87 {
88         struct intel_wakeref *wf = container_of(wrk, typeof(*wf), work);
89
90         if (atomic_add_unless(&wf->count, -1, 1))
91                 return;
92
93         mutex_lock(&wf->mutex);
94         ____intel_wakeref_put_last(wf);
95 }
96
97 void __intel_wakeref_init(struct intel_wakeref *wf,
98                           struct intel_runtime_pm *rpm,
99                           const struct intel_wakeref_ops *ops,
100                           struct lock_class_key *key)
101 {
102         wf->rpm = rpm;
103         wf->ops = ops;
104
105         __mutex_init(&wf->mutex, "wakeref", key);
106         atomic_set(&wf->count, 0);
107         wf->wakeref = 0;
108
109         INIT_WORK(&wf->work, __intel_wakeref_put_work);
110 }
111
112 int intel_wakeref_wait_for_idle(struct intel_wakeref *wf)
113 {
114         return wait_var_event_killable(&wf->wakeref,
115                                        !intel_wakeref_is_active(wf));
116 }
117
118 static void wakeref_auto_timeout(struct timer_list *t)
119 {
120         struct intel_wakeref_auto *wf = from_timer(wf, t, timer);
121         intel_wakeref_t wakeref;
122         unsigned long flags;
123
124         if (!refcount_dec_and_lock_irqsave(&wf->count, &wf->lock, &flags))
125                 return;
126
127         wakeref = fetch_and_zero(&wf->wakeref);
128         spin_unlock_irqrestore(&wf->lock, flags);
129
130         intel_runtime_pm_put(wf->rpm, wakeref);
131 }
132
133 void intel_wakeref_auto_init(struct intel_wakeref_auto *wf,
134                              struct intel_runtime_pm *rpm)
135 {
136         spin_lock_init(&wf->lock);
137         timer_setup(&wf->timer, wakeref_auto_timeout, 0);
138         refcount_set(&wf->count, 0);
139         wf->wakeref = 0;
140         wf->rpm = rpm;
141 }
142
143 void intel_wakeref_auto(struct intel_wakeref_auto *wf, unsigned long timeout)
144 {
145         unsigned long flags;
146
147         if (!timeout) {
148                 if (del_timer_sync(&wf->timer))
149                         wakeref_auto_timeout(&wf->timer);
150                 return;
151         }
152
153         /* Our mission is that we only extend an already active wakeref */
154         assert_rpm_wakelock_held(wf->rpm);
155
156         if (!refcount_inc_not_zero(&wf->count)) {
157                 spin_lock_irqsave(&wf->lock, flags);
158                 if (!refcount_inc_not_zero(&wf->count)) {
159                         INTEL_WAKEREF_BUG_ON(wf->wakeref);
160                         wf->wakeref = intel_runtime_pm_get_if_in_use(wf->rpm);
161                         refcount_set(&wf->count, 1);
162                 }
163                 spin_unlock_irqrestore(&wf->lock, flags);
164         }
165
166         /*
167          * If we extend a pending timer, we will only get a single timer
168          * callback and so need to cancel the local inc by running the
169          * elided callback to keep the wf->count balanced.
170          */
171         if (mod_timer(&wf->timer, jiffies + timeout))
172                 wakeref_auto_timeout(&wf->timer);
173 }
174
175 void intel_wakeref_auto_fini(struct intel_wakeref_auto *wf)
176 {
177         intel_wakeref_auto(wf, 0);
178         INTEL_WAKEREF_BUG_ON(wf->wakeref);
179 }