]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
drm/i915: Move GEM object domain management from struct_mutex to local
[linux.git] / drivers / gpu / drm / i915 / gem / selftests / i915_gem_context.c
index 653ae08a277facdf84a95bed42029d17be464664..72eedd6c2a0a656bb0ed0de9454c716a701882de 100644 (file)
@@ -209,7 +209,9 @@ gpu_fill_dw(struct i915_vma *vma, u64 offset, unsigned long count, u32 value)
        i915_gem_object_flush_map(obj);
        i915_gem_object_unpin_map(obj);
 
+       i915_gem_object_lock(obj);
        err = i915_gem_object_set_to_gtt_domain(obj, false);
+       i915_gem_object_unlock(obj);
        if (err)
                goto err;
 
@@ -261,7 +263,9 @@ static int gpu_fill(struct drm_i915_gem_object *obj,
        if (IS_ERR(vma))
                return PTR_ERR(vma);
 
+       i915_gem_object_lock(obj);
        err = i915_gem_object_set_to_gtt_domain(obj, false);
+       i915_gem_object_unlock(obj);
        if (err)
                return err;
 
@@ -302,11 +306,15 @@ static int gpu_fill(struct drm_i915_gem_object *obj,
        if (err)
                goto err_request;
 
+       i915_vma_lock(batch);
        err = i915_vma_move_to_active(batch, rq, 0);
+       i915_vma_unlock(batch);
        if (err)
                goto skip_request;
 
+       i915_vma_lock(vma);
        err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+       i915_vma_unlock(vma);
        if (err)
                goto skip_request;
 
@@ -754,7 +762,9 @@ emit_rpcs_query(struct drm_i915_gem_object *obj,
        if (IS_ERR(vma))
                return PTR_ERR(vma);
 
+       i915_gem_object_lock(obj);
        err = i915_gem_object_set_to_gtt_domain(obj, false);
+       i915_gem_object_unlock(obj);
        if (err)
                return err;
 
@@ -780,11 +790,15 @@ emit_rpcs_query(struct drm_i915_gem_object *obj,
        if (err)
                goto err_request;
 
+       i915_vma_lock(batch);
        err = i915_vma_move_to_active(batch, rq, 0);
+       i915_vma_unlock(batch);
        if (err)
                goto skip_request;
 
+       i915_vma_lock(vma);
        err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+       i915_vma_unlock(vma);
        if (err)
                goto skip_request;
 
@@ -1345,7 +1359,9 @@ static int write_to_scratch(struct i915_gem_context *ctx,
        if (err)
                goto err_request;
 
+       i915_vma_lock(vma);
        err = i915_vma_move_to_active(vma, rq, 0);
+       i915_vma_unlock(vma);
        if (err)
                goto skip_request;
 
@@ -1440,7 +1456,9 @@ static int read_from_scratch(struct i915_gem_context *ctx,
        if (err)
                goto err_request;
 
+       i915_vma_lock(vma);
        err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+       i915_vma_unlock(vma);
        if (err)
                goto skip_request;
 
@@ -1449,7 +1467,9 @@ static int read_from_scratch(struct i915_gem_context *ctx,
 
        i915_request_add(rq);
 
+       i915_gem_object_lock(obj);
        err = i915_gem_object_set_to_cpu_domain(obj, false);
+       i915_gem_object_unlock(obj);
        if (err)
                goto err;