]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/gpu/drm/i915/gt/intel_engine_pm.c
6b15e3335dd6e6ac505be14904ba17a533a36b7e
[linux.git] / drivers / gpu / drm / i915 / gt / intel_engine_pm.c
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2019 Intel Corporation
5  */
6
7 #include "i915_drv.h"
8
9 #include "intel_engine.h"
10 #include "intel_engine_pm.h"
11 #include "intel_engine_pool.h"
12 #include "intel_gt.h"
13 #include "intel_gt_pm.h"
14
15 static int __engine_unpark(struct intel_wakeref *wf)
16 {
17         struct intel_engine_cs *engine =
18                 container_of(wf, typeof(*engine), wakeref);
19         void *map;
20
21         GEM_TRACE("%s\n", engine->name);
22
23         intel_gt_pm_get(engine->gt);
24
25         /* Pin the default state for fast resets from atomic context. */
26         map = NULL;
27         if (engine->default_state)
28                 map = i915_gem_object_pin_map(engine->default_state,
29                                               I915_MAP_WB);
30         if (!IS_ERR_OR_NULL(map))
31                 engine->pinned_default_state = map;
32
33         if (engine->unpark)
34                 engine->unpark(engine);
35
36         intel_engine_init_hangcheck(engine);
37         return 0;
38 }
39
40 static bool switch_to_kernel_context(struct intel_engine_cs *engine)
41 {
42         struct i915_request *rq;
43
44         /* Already inside the kernel context, safe to power down. */
45         if (engine->wakeref_serial == engine->serial)
46                 return true;
47
48         /* GPU is pointing to the void, as good as in the kernel context. */
49         if (intel_gt_is_wedged(engine->gt))
50                 return true;
51
52         /*
53          * Note, we do this without taking the timeline->mutex. We cannot
54          * as we may be called while retiring the kernel context and so
55          * already underneath the timeline->mutex. Instead we rely on the
56          * exclusive property of the __engine_park that prevents anyone
57          * else from creating a request on this engine. This also requires
58          * that the ring is empty and we avoid any waits while constructing
59          * the context, as they assume protection by the timeline->mutex.
60          * This should hold true as we can only park the engine after
61          * retiring the last request, thus all rings should be empty and
62          * all timelines idle.
63          */
64         rq = __i915_request_create(engine->kernel_context, GFP_NOWAIT);
65         if (IS_ERR(rq))
66                 /* Context switch failed, hope for the best! Maybe reset? */
67                 return true;
68
69         /* Check again on the next retirement. */
70         engine->wakeref_serial = engine->serial + 1;
71
72         i915_request_add_active_barriers(rq);
73         __i915_request_commit(rq);
74
75         return false;
76 }
77
78 static int __engine_park(struct intel_wakeref *wf)
79 {
80         struct intel_engine_cs *engine =
81                 container_of(wf, typeof(*engine), wakeref);
82
83         engine->saturated = 0;
84
85         /*
86          * If one and only one request is completed between pm events,
87          * we know that we are inside the kernel context and it is
88          * safe to power down. (We are paranoid in case that runtime
89          * suspend causes corruption to the active context image, and
90          * want to avoid that impacting userspace.)
91          */
92         if (!switch_to_kernel_context(engine))
93                 return -EBUSY;
94
95         GEM_TRACE("%s\n", engine->name);
96
97         intel_engine_disarm_breadcrumbs(engine);
98         intel_engine_pool_park(&engine->pool);
99
100         /* Must be reset upon idling, or we may miss the busy wakeup. */
101         GEM_BUG_ON(engine->execlists.queue_priority_hint != INT_MIN);
102
103         if (engine->park)
104                 engine->park(engine);
105
106         if (engine->pinned_default_state) {
107                 i915_gem_object_unpin_map(engine->default_state);
108                 engine->pinned_default_state = NULL;
109         }
110
111         engine->execlists.no_priolist = false;
112
113         intel_gt_pm_put(engine->gt);
114         return 0;
115 }
116
117 static const struct intel_wakeref_ops wf_ops = {
118         .get = __engine_unpark,
119         .put = __engine_park,
120 };
121
122 void intel_engine_init__pm(struct intel_engine_cs *engine)
123 {
124         struct intel_runtime_pm *rpm = &engine->i915->runtime_pm;
125
126         intel_wakeref_init(&engine->wakeref, rpm, &wf_ops);
127 }
128
129 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
130 #include "selftest_engine_pm.c"
131 #endif