]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
drm/i915: Use page coloring to provide the guard page at the end of the GTT
authorChris Wilson <chris@chris-wilson.co.uk>
Mon, 6 Feb 2017 08:45:47 +0000 (08:45 +0000)
committerChris Wilson <chris@chris-wilson.co.uk>
Mon, 6 Feb 2017 13:46:40 +0000 (13:46 +0000)
As we now mark the reserved hole (drm_mm.head_node) with the special
UNEVICTABLE color, we can use the page coloring to avoid prefetching of
the CS beyond the end of the GTT.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: http://patchwork.freedesktop.org/patch/msgid/20170206084547.27921-3-chris@chris-wilson.co.uk
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
drivers/gpu/drm/i915/i915_gem_evict.c
drivers/gpu/drm/i915/i915_gem_gtt.c

index a43e44e18042d4ada2c62d65d1969fffb2838628..6d2fcfafd3c565d3df902b9902838b893d0f8a3b 100644 (file)
@@ -253,6 +253,9 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
        int ret = 0;
 
        lockdep_assert_held(&vm->i915->drm.struct_mutex);
+       GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
+       GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
+
        trace_i915_gem_evict_node(vm, target, flags);
 
        /* Retire before we search the active list. Although we have
@@ -268,9 +271,11 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
                /* Expand search to cover neighbouring guard pages (or lack!) */
                if (start > vm->start)
                        start -= I915_GTT_PAGE_SIZE;
-               if (end < vm->start + vm->total)
-                       end += I915_GTT_PAGE_SIZE;
+
+               /* Always look at the page afterwards to avoid the end-of-GTT */
+               end += I915_GTT_PAGE_SIZE;
        }
+       GEM_BUG_ON(start >= end);
 
        drm_mm_for_each_node_in_range(node, &vm->mm, start, end) {
                /* If we find any non-objects (!vma), we cannot evict them */
@@ -279,6 +284,7 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
                        break;
                }
 
+               GEM_BUG_ON(!node->allocated);
                vma = container_of(node, typeof(*vma), node);
 
                /* If we are using coloring to insert guard pages between
index 9f1f3bfead59c2009a0b646c577fd8007b9322b4..850d40ec77afd773a8b5bf09f1daa49d6d5e2e87 100644 (file)
@@ -2718,11 +2718,16 @@ static void i915_gtt_color_adjust(const struct drm_mm_node *node,
                                  u64 *start,
                                  u64 *end)
 {
-       if (node->color != color)
+       if (node->allocated && node->color != color)
                *start += I915_GTT_PAGE_SIZE;
 
+       /* Also leave a space between the unallocated reserved node after the
+        * GTT and any objects within the GTT, i.e. we use the color adjustment
+        * to insert a guard page to prevent prefetches crossing over the
+        * GTT boundary.
+        */
        node = list_next_entry(node, node_list);
-       if (node->allocated && node->color != color)
+       if (node->color != color)
                *end -= I915_GTT_PAGE_SIZE;
 }
 
@@ -3243,14 +3248,14 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
 
        INIT_LIST_HEAD(&dev_priv->vm_list);
 
-       /* Subtract the guard page before address space initialization to
-        * shrink the range used by drm_mm.
+       /* Note that we use page colouring to enforce a guard page at the
+        * end of the address space. This is required as the CS may prefetch
+        * beyond the end of the batch buffer, across the page boundary,
+        * and beyond the end of the GTT if we do not provide a guard.
         */
        mutex_lock(&dev_priv->drm.struct_mutex);
-       ggtt->base.total -= I915_GTT_PAGE_SIZE;
        i915_address_space_init(&ggtt->base, dev_priv, "[global]");
-       ggtt->base.total += I915_GTT_PAGE_SIZE;
-       if (!HAS_LLC(dev_priv))
+       if (!HAS_LLC(dev_priv) && !USES_PPGTT(dev_priv))
                ggtt->base.mm.color_adjust = i915_gtt_color_adjust;
        mutex_unlock(&dev_priv->drm.struct_mutex);