2 * SPDX-License-Identifier: MIT
4 * Copyright © 2019 Intel Corporation
7 #include <linux/wait_bit.h>
9 #include "intel_runtime_pm.h"
10 #include "intel_wakeref.h"
12 static void rpm_get(struct intel_wakeref *wf)
14 wf->wakeref = intel_runtime_pm_get(wf->rpm);
17 static void rpm_put(struct intel_wakeref *wf)
19 intel_wakeref_t wakeref = fetch_and_zero(&wf->wakeref);
21 intel_runtime_pm_put(wf->rpm, wakeref);
22 INTEL_WAKEREF_BUG_ON(!wakeref);
25 int __intel_wakeref_get_first(struct intel_wakeref *wf)
28 * Treat get/put as different subclasses, as we may need to run
29 * the put callback from under the shrinker and do not want to
30 * cross-contanimate that callback with any extra work performed
31 * upon acquiring the wakeref.
33 mutex_lock_nested(&wf->mutex, SINGLE_DEPTH_NESTING);
34 if (!atomic_read(&wf->count)) {
39 err = wf->ops->get(wf);
42 mutex_unlock(&wf->mutex);
46 smp_mb__before_atomic(); /* release wf->count */
48 atomic_inc(&wf->count);
49 mutex_unlock(&wf->mutex);
51 INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0);
55 static void ____intel_wakeref_put_last(struct intel_wakeref *wf)
57 if (!atomic_dec_and_test(&wf->count))
60 if (likely(!wf->ops->put(wf))) {
62 wake_up_var(&wf->wakeref);
64 /* ops->put() must schedule its own release on deferral */
65 atomic_set_release(&wf->count, 1);
69 mutex_unlock(&wf->mutex);
72 void __intel_wakeref_put_last(struct intel_wakeref *wf)
74 INTEL_WAKEREF_BUG_ON(work_pending(&wf->work));
76 /* Assume we are not in process context and so cannot sleep. */
77 if (wf->ops->flags & INTEL_WAKEREF_PUT_ASYNC ||
78 !mutex_trylock(&wf->mutex)) {
79 schedule_work(&wf->work);
83 ____intel_wakeref_put_last(wf);
86 static void __intel_wakeref_put_work(struct work_struct *wrk)
88 struct intel_wakeref *wf = container_of(wrk, typeof(*wf), work);
90 if (atomic_add_unless(&wf->count, -1, 1))
93 mutex_lock(&wf->mutex);
94 ____intel_wakeref_put_last(wf);
97 void __intel_wakeref_init(struct intel_wakeref *wf,
98 struct intel_runtime_pm *rpm,
99 const struct intel_wakeref_ops *ops,
100 struct lock_class_key *key)
105 __mutex_init(&wf->mutex, "wakeref", key);
106 atomic_set(&wf->count, 0);
109 INIT_WORK(&wf->work, __intel_wakeref_put_work);
112 int intel_wakeref_wait_for_idle(struct intel_wakeref *wf)
114 return wait_var_event_killable(&wf->wakeref,
115 !intel_wakeref_is_active(wf));
118 static void wakeref_auto_timeout(struct timer_list *t)
120 struct intel_wakeref_auto *wf = from_timer(wf, t, timer);
121 intel_wakeref_t wakeref;
124 if (!refcount_dec_and_lock_irqsave(&wf->count, &wf->lock, &flags))
127 wakeref = fetch_and_zero(&wf->wakeref);
128 spin_unlock_irqrestore(&wf->lock, flags);
130 intel_runtime_pm_put(wf->rpm, wakeref);
133 void intel_wakeref_auto_init(struct intel_wakeref_auto *wf,
134 struct intel_runtime_pm *rpm)
136 spin_lock_init(&wf->lock);
137 timer_setup(&wf->timer, wakeref_auto_timeout, 0);
138 refcount_set(&wf->count, 0);
143 void intel_wakeref_auto(struct intel_wakeref_auto *wf, unsigned long timeout)
148 if (del_timer_sync(&wf->timer))
149 wakeref_auto_timeout(&wf->timer);
153 /* Our mission is that we only extend an already active wakeref */
154 assert_rpm_wakelock_held(wf->rpm);
156 if (!refcount_inc_not_zero(&wf->count)) {
157 spin_lock_irqsave(&wf->lock, flags);
158 if (!refcount_inc_not_zero(&wf->count)) {
159 INTEL_WAKEREF_BUG_ON(wf->wakeref);
160 wf->wakeref = intel_runtime_pm_get_if_in_use(wf->rpm);
161 refcount_set(&wf->count, 1);
163 spin_unlock_irqrestore(&wf->lock, flags);
167 * If we extend a pending timer, we will only get a single timer
168 * callback and so need to cancel the local inc by running the
169 * elided callback to keep the wf->count balanced.
171 if (mod_timer(&wf->timer, jiffies + timeout))
172 wakeref_auto_timeout(&wf->timer);
175 void intel_wakeref_auto_fini(struct intel_wakeref_auto *wf)
177 intel_wakeref_auto(wf, 0);
178 INTEL_WAKEREF_BUG_ON(wf->wakeref);