2 * SPDX-License-Identifier: MIT
4 * Copyright © 2019 Intel Corporation
7 #include "gem/i915_gem_context.h"
8 #include "gem/i915_gem_pm.h"
11 #include "i915_globals.h"
13 #include "intel_context.h"
14 #include "intel_engine.h"
15 #include "intel_engine_pm.h"
16 #include "intel_ring.h"
18 static struct i915_global_context {
19 struct i915_global base;
20 struct kmem_cache *slab_ce;
23 static struct intel_context *intel_context_alloc(void)
25 return kmem_cache_zalloc(global.slab_ce, GFP_KERNEL);
28 void intel_context_free(struct intel_context *ce)
30 kmem_cache_free(global.slab_ce, ce);
33 struct intel_context *
34 intel_context_create(struct intel_engine_cs *engine)
36 struct intel_context *ce;
38 ce = intel_context_alloc();
40 return ERR_PTR(-ENOMEM);
42 intel_context_init(ce, engine);
46 int intel_context_alloc_state(struct intel_context *ce)
50 if (mutex_lock_interruptible(&ce->pin_mutex))
53 if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) {
54 err = ce->ops->alloc(ce);
58 set_bit(CONTEXT_ALLOC_BIT, &ce->flags);
62 mutex_unlock(&ce->pin_mutex);
66 static int intel_context_active_acquire(struct intel_context *ce)
70 __i915_active_acquire(&ce->active);
72 if (intel_context_is_barrier(ce))
75 /* Preallocate tracking nodes */
76 err = i915_active_acquire_preallocate_barrier(&ce->active,
79 i915_active_release(&ce->active);
84 static void intel_context_active_release(struct intel_context *ce)
86 /* Nodes preallocated in intel_context_active() */
87 i915_active_acquire_barrier(&ce->active);
88 i915_active_release(&ce->active);
91 int __intel_context_do_pin(struct intel_context *ce)
95 if (unlikely(!test_bit(CONTEXT_ALLOC_BIT, &ce->flags))) {
96 err = intel_context_alloc_state(ce);
101 err = i915_active_acquire(&ce->active);
105 if (mutex_lock_interruptible(&ce->pin_mutex)) {
110 if (likely(!atomic_add_unless(&ce->pin_count, 1, 0))) {
111 err = intel_context_active_acquire(ce);
115 err = ce->ops->pin(ce);
119 CE_TRACE(ce, "pin ring:{head:%04x, tail:%04x}\n",
120 ce->ring->head, ce->ring->tail);
122 smp_mb__before_atomic(); /* flush pin before it is visible */
123 atomic_inc(&ce->pin_count);
126 GEM_BUG_ON(!intel_context_is_pinned(ce)); /* no overflow! */
127 GEM_BUG_ON(i915_active_is_idle(&ce->active));
131 intel_context_active_release(ce);
133 mutex_unlock(&ce->pin_mutex);
135 i915_active_release(&ce->active);
139 void intel_context_unpin(struct intel_context *ce)
141 if (!atomic_dec_and_test(&ce->pin_count))
144 CE_TRACE(ce, "unpin\n");
148 * Once released, we may asynchronously drop the active reference.
149 * As that may be the only reference keeping the context alive,
150 * take an extra now so that it is not freed before we finish
153 intel_context_get(ce);
154 intel_context_active_release(ce);
155 intel_context_put(ce);
158 static int __context_pin_state(struct i915_vma *vma)
160 unsigned int bias = i915_ggtt_pin_bias(vma) | PIN_OFFSET_BIAS;
163 err = i915_ggtt_pin(vma, 0, bias | PIN_HIGH);
167 err = i915_active_acquire(&vma->active);
172 * And mark it as a globally pinned object to let the shrinker know
173 * it cannot reclaim the object until we release it.
175 i915_vma_make_unshrinkable(vma);
176 vma->obj->mm.dirty = true;
185 static void __context_unpin_state(struct i915_vma *vma)
187 i915_vma_make_shrinkable(vma);
188 i915_active_release(&vma->active);
189 __i915_vma_unpin(vma);
192 static int __ring_active(struct intel_ring *ring)
196 err = i915_active_acquire(&ring->vma->active);
200 err = intel_ring_pin(ring);
207 i915_active_release(&ring->vma->active);
211 static void __ring_retire(struct intel_ring *ring)
213 intel_ring_unpin(ring);
214 i915_active_release(&ring->vma->active);
218 static void __intel_context_retire(struct i915_active *active)
220 struct intel_context *ce = container_of(active, typeof(*ce), active);
222 CE_TRACE(ce, "retire\n");
224 set_bit(CONTEXT_VALID_BIT, &ce->flags);
226 __context_unpin_state(ce->state);
228 intel_timeline_unpin(ce->timeline);
229 __ring_retire(ce->ring);
231 intel_context_put(ce);
234 static int __intel_context_active(struct i915_active *active)
236 struct intel_context *ce = container_of(active, typeof(*ce), active);
239 CE_TRACE(ce, "active\n");
241 intel_context_get(ce);
243 err = __ring_active(ce->ring);
247 err = intel_timeline_pin(ce->timeline);
254 err = __context_pin_state(ce->state);
261 intel_timeline_unpin(ce->timeline);
263 __ring_retire(ce->ring);
265 intel_context_put(ce);
270 intel_context_init(struct intel_context *ce,
271 struct intel_engine_cs *engine)
273 GEM_BUG_ON(!engine->cops);
274 GEM_BUG_ON(!engine->gt->vm);
279 ce->ops = engine->cops;
280 ce->sseu = engine->sseu;
281 ce->ring = __intel_context_ring_size(SZ_4K);
283 ce->vm = i915_vm_get(engine->gt->vm);
285 INIT_LIST_HEAD(&ce->signal_link);
286 INIT_LIST_HEAD(&ce->signals);
288 mutex_init(&ce->pin_mutex);
290 i915_active_init(&ce->active,
291 __intel_context_active, __intel_context_retire);
294 void intel_context_fini(struct intel_context *ce)
297 intel_timeline_put(ce->timeline);
300 mutex_destroy(&ce->pin_mutex);
301 i915_active_fini(&ce->active);
304 static void i915_global_context_shrink(void)
306 kmem_cache_shrink(global.slab_ce);
309 static void i915_global_context_exit(void)
311 kmem_cache_destroy(global.slab_ce);
314 static struct i915_global_context global = { {
315 .shrink = i915_global_context_shrink,
316 .exit = i915_global_context_exit,
319 int __init i915_global_context_init(void)
321 global.slab_ce = KMEM_CACHE(intel_context, SLAB_HWCACHE_ALIGN);
325 i915_global_register(&global.base);
329 void intel_context_enter_engine(struct intel_context *ce)
331 intel_engine_pm_get(ce->engine);
332 intel_timeline_enter(ce->timeline);
335 void intel_context_exit_engine(struct intel_context *ce)
337 intel_timeline_exit(ce->timeline);
338 intel_engine_pm_put(ce->engine);
341 int intel_context_prepare_remote_request(struct intel_context *ce,
342 struct i915_request *rq)
344 struct intel_timeline *tl = ce->timeline;
347 /* Only suitable for use in remotely modifying this context */
348 GEM_BUG_ON(rq->context == ce);
350 if (rcu_access_pointer(rq->timeline) != tl) { /* timeline sharing! */
351 /* Queue this switch after current activity by this context. */
352 err = i915_active_fence_set(&tl->last_request, rq);
358 * Guarantee context image and the timeline remains pinned until the
359 * modifying request is retired by setting the ce activity tracker.
361 * But we only need to take one pin on the account of it. Or in other
362 * words transfer the pinned ce object to tracked active request.
364 GEM_BUG_ON(i915_active_is_idle(&ce->active));
365 return i915_active_add_request(&ce->active, rq);
368 struct i915_request *intel_context_create_request(struct intel_context *ce)
370 struct i915_request *rq;
373 err = intel_context_pin(ce);
377 rq = i915_request_create(ce);
378 intel_context_unpin(ce);
383 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
384 #include "selftest_context.c"