]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
drm/i915: Remove walk over obj->vma_list for the shrinker
authorChris Wilson <chris@chris-wilson.co.uk>
Fri, 13 Oct 2017 20:26:16 +0000 (21:26 +0100)
committerChris Wilson <chris@chris-wilson.co.uk>
Mon, 16 Oct 2017 19:44:19 +0000 (20:44 +0100)
In the next patch, we want to reduce the lock coverage within the
shrinker, and one of the dangerous walks we have is over obj->vma_list.
We are only walking the obj->vma_list in order to check whether it has
been permanently pinned by HW access, typically via use on the scanout.
But we have a couple of other long term pins, the context objects for
which we currently have to check the individual vma pin_count. If we
instead mark these using obj->pin_display, we can forgo the dangerous
and sometimes slow list iteration.

v2: Rearrange code to try and avoid confusion from false associations
due to arrangement of whitespace along with rebasing on obj->pin_global.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20171013202621.7276-4-chris@chris-wilson.co.uk
drivers/gpu/drm/i915/i915_gem_shrinker.c
drivers/gpu/drm/i915/intel_lrc.c
drivers/gpu/drm/i915/intel_ringbuffer.c

index b5c87d89777b2684e2220915cd63cdc18b396d9b..575a6b735f395ed98b3a2c02910306e6c8fa9d24 100644 (file)
@@ -71,25 +71,6 @@ static void shrinker_unlock(struct drm_i915_private *dev_priv, bool unlock)
        mutex_unlock(&dev_priv->drm.struct_mutex);
 }
 
-static bool any_vma_pinned(struct drm_i915_gem_object *obj)
-{
-       struct i915_vma *vma;
-
-       list_for_each_entry(vma, &obj->vma_list, obj_link) {
-               /* Only GGTT vma may be permanently pinned, and are always
-                * at the start of the list. We can stop hunting as soon
-                * as we see a ppGTT vma.
-                */
-               if (!i915_vma_is_ggtt(vma))
-                       break;
-
-               if (i915_vma_is_pinned(vma))
-                       return true;
-       }
-
-       return false;
-}
-
 static bool swap_available(void)
 {
        return get_nr_swap_pages() > 0;
@@ -115,7 +96,13 @@ static bool can_release_pages(struct drm_i915_gem_object *obj)
        if (atomic_read(&obj->mm.pages_pin_count) > obj->bind_count)
                return false;
 
-       if (any_vma_pinned(obj))
+       /* If any vma are "permanently" pinned, it will prevent us from
+        * reclaiming the obj->mm.pages. We only allow scanout objects to claim
+        * a permanent pin, along with a few others like the context objects.
+        * To simplify the scan, and to avoid walking the list of vma under the
+        * object, we just check the count of its permanently pinned.
+        */
+       if (obj->pin_global)
                return false;
 
        /* We can only return physical pages to the system if we can either
index 766552f2cfaea1afc0fb773ea79bbc80fab614f5..7f45dd7dc3e50cb35f0c7de71231a1e0164c388d 100644 (file)
@@ -1093,6 +1093,7 @@ execlists_context_pin(struct intel_engine_cs *engine,
                i915_ggtt_offset(ce->ring->vma);
 
        ce->state->obj->mm.dirty = true;
+       ce->state->obj->pin_global++;
 
        i915_gem_context_get(ctx);
 out:
@@ -1120,6 +1121,7 @@ static void execlists_context_unpin(struct intel_engine_cs *engine,
 
        intel_ring_unpin(ce->ring);
 
+       ce->state->obj->pin_global--;
        i915_gem_object_unpin_map(ce->state->obj);
        i915_vma_unpin(ce->state);
 
index b2a6cb09c6e761899ee93a1fc4fc6e760a552287..8da1bde442dd94a677335ee8f3ffc44983ee33b8 100644 (file)
@@ -1244,6 +1244,8 @@ int intel_ring_pin(struct intel_ring *ring,
        if (IS_ERR(addr))
                goto err;
 
+       vma->obj->pin_global++;
+
        ring->vaddr = addr;
        return 0;
 
@@ -1275,6 +1277,7 @@ void intel_ring_unpin(struct intel_ring *ring)
                i915_gem_object_unpin_map(ring->vma->obj);
        ring->vaddr = NULL;
 
+       ring->vma->obj->pin_global--;
        i915_vma_unpin(ring->vma);
 }
 
@@ -1439,6 +1442,7 @@ intel_ring_context_pin(struct intel_engine_cs *engine,
                        goto err;
 
                ce->state->obj->mm.dirty = true;
+               ce->state->obj->pin_global++;
        }
 
        /* The kernel context is only used as a placeholder for flushing the
@@ -1473,8 +1477,10 @@ static void intel_ring_context_unpin(struct intel_engine_cs *engine,
        if (--ce->pin_count)
                return;
 
-       if (ce->state)
+       if (ce->state) {
+               ce->state->obj->pin_global--;
                i915_vma_unpin(ce->state);
+       }
 
        i915_gem_context_put(ctx);
 }