2 * SPDX-License-Identifier: MIT
4 * Copyright © 2019 Intel Corporation
8 #include "i915_gem_context.h"
9 #include "i915_globals.h"
10 #include "intel_context.h"
11 #include "intel_ringbuffer.h"
13 static struct i915_global_context {
14 struct i915_global base;
15 struct kmem_cache *slab_ce;
18 struct intel_context *intel_context_alloc(void)
20 return kmem_cache_zalloc(global.slab_ce, GFP_KERNEL);
23 void intel_context_free(struct intel_context *ce)
25 kmem_cache_free(global.slab_ce, ce);
28 struct intel_context *
29 intel_context_lookup(struct i915_gem_context *ctx,
30 struct intel_engine_cs *engine)
32 struct intel_context *ce = NULL;
35 spin_lock(&ctx->hw_contexts_lock);
36 p = ctx->hw_contexts.rb_node;
38 struct intel_context *this =
39 rb_entry(p, struct intel_context, node);
41 if (this->engine == engine) {
42 GEM_BUG_ON(this->gem_context != ctx);
47 if (this->engine < engine)
52 spin_unlock(&ctx->hw_contexts_lock);
57 struct intel_context *
58 __intel_context_insert(struct i915_gem_context *ctx,
59 struct intel_engine_cs *engine,
60 struct intel_context *ce)
62 struct rb_node **p, *parent;
65 spin_lock(&ctx->hw_contexts_lock);
68 p = &ctx->hw_contexts.rb_node;
70 struct intel_context *this;
73 this = rb_entry(parent, struct intel_context, node);
75 if (this->engine == engine) {
81 if (this->engine < engine)
82 p = &parent->rb_right;
87 rb_link_node(&ce->node, parent, p);
88 rb_insert_color(&ce->node, &ctx->hw_contexts);
91 spin_unlock(&ctx->hw_contexts_lock);
96 void __intel_context_remove(struct intel_context *ce)
98 struct i915_gem_context *ctx = ce->gem_context;
100 spin_lock(&ctx->hw_contexts_lock);
101 rb_erase(&ce->node, &ctx->hw_contexts);
102 spin_unlock(&ctx->hw_contexts_lock);
105 static struct intel_context *
106 intel_context_instance(struct i915_gem_context *ctx,
107 struct intel_engine_cs *engine)
109 struct intel_context *ce, *pos;
111 ce = intel_context_lookup(ctx, engine);
115 ce = intel_context_alloc();
117 return ERR_PTR(-ENOMEM);
119 intel_context_init(ce, ctx, engine);
121 pos = __intel_context_insert(ctx, engine, ce);
122 if (unlikely(pos != ce)) /* Beaten! Use their HW context instead */
123 intel_context_free(ce);
125 GEM_BUG_ON(intel_context_lookup(ctx, engine) != pos);
129 struct intel_context *
130 intel_context_pin_lock(struct i915_gem_context *ctx,
131 struct intel_engine_cs *engine)
132 __acquires(ce->pin_mutex)
134 struct intel_context *ce;
136 ce = intel_context_instance(ctx, engine);
140 if (mutex_lock_interruptible(&ce->pin_mutex))
141 return ERR_PTR(-EINTR);
146 struct intel_context *
147 intel_context_pin(struct i915_gem_context *ctx,
148 struct intel_engine_cs *engine)
150 struct intel_context *ce;
153 ce = intel_context_instance(ctx, engine);
157 if (likely(atomic_inc_not_zero(&ce->pin_count)))
160 if (mutex_lock_interruptible(&ce->pin_mutex))
161 return ERR_PTR(-EINTR);
163 if (likely(!atomic_read(&ce->pin_count))) {
164 err = ce->ops->pin(ce);
168 i915_gem_context_get(ctx);
169 GEM_BUG_ON(ce->gem_context != ctx);
171 mutex_lock(&ctx->mutex);
172 list_add(&ce->active_link, &ctx->active_engines);
173 mutex_unlock(&ctx->mutex);
175 intel_context_get(ce);
176 smp_mb__before_atomic(); /* flush pin before it is visible */
179 atomic_inc(&ce->pin_count);
180 GEM_BUG_ON(!intel_context_is_pinned(ce)); /* no overflow! */
182 mutex_unlock(&ce->pin_mutex);
186 mutex_unlock(&ce->pin_mutex);
190 void intel_context_unpin(struct intel_context *ce)
192 if (likely(atomic_add_unless(&ce->pin_count, -1, 1)))
195 /* We may be called from inside intel_context_pin() to evict another */
196 intel_context_get(ce);
197 mutex_lock_nested(&ce->pin_mutex, SINGLE_DEPTH_NESTING);
199 if (likely(atomic_dec_and_test(&ce->pin_count))) {
202 mutex_lock(&ce->gem_context->mutex);
203 list_del(&ce->active_link);
204 mutex_unlock(&ce->gem_context->mutex);
206 i915_gem_context_put(ce->gem_context);
207 intel_context_put(ce);
210 mutex_unlock(&ce->pin_mutex);
211 intel_context_put(ce);
214 static void intel_context_retire(struct i915_active_request *active,
215 struct i915_request *rq)
217 struct intel_context *ce =
218 container_of(active, typeof(*ce), active_tracker);
220 intel_context_unpin(ce);
224 intel_context_init(struct intel_context *ce,
225 struct i915_gem_context *ctx,
226 struct intel_engine_cs *engine)
230 ce->gem_context = ctx;
232 ce->ops = engine->cops;
235 INIT_LIST_HEAD(&ce->signal_link);
236 INIT_LIST_HEAD(&ce->signals);
238 mutex_init(&ce->pin_mutex);
240 /* Use the whole device by default */
241 ce->sseu = intel_device_default_sseu(ctx->i915);
243 i915_active_request_init(&ce->active_tracker,
244 NULL, intel_context_retire);
247 static void i915_global_context_shrink(void)
249 kmem_cache_shrink(global.slab_ce);
252 static void i915_global_context_exit(void)
254 kmem_cache_destroy(global.slab_ce);
257 static struct i915_global_context global = { {
258 .shrink = i915_global_context_shrink,
259 .exit = i915_global_context_exit,
262 int __init i915_global_context_init(void)
264 global.slab_ce = KMEM_CACHE(intel_context, SLAB_HWCACHE_ALIGN);
268 i915_global_register(&global.base);