]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - drivers/gpu/drm/i915/gem/i915_gem_object.c
Merge tag 'usercopy-v5.4-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux
[linux.git] / drivers / gpu / drm / i915 / gem / i915_gem_object.c
index afd75b85da1d9e1ba14171cf61d5d5f3bc9fcc00..d7855dc5a5c57b9a844f3b8fa10cc7d979e4d93c 100644 (file)
@@ -29,6 +29,7 @@
 #include "i915_gem_context.h"
 #include "i915_gem_object.h"
 #include "i915_globals.h"
+#include "i915_trace.h"
 
 static struct i915_global_object {
        struct i915_global base;
@@ -45,16 +46,6 @@ void i915_gem_object_free(struct drm_i915_gem_object *obj)
        return kmem_cache_free(global.slab_objects, obj);
 }
 
-static void
-frontbuffer_retire(struct i915_active_request *active,
-                  struct i915_request *request)
-{
-       struct drm_i915_gem_object *obj =
-               container_of(active, typeof(*obj), frontbuffer_write);
-
-       intel_fb_obj_flush(obj, ORIGIN_CS);
-}
-
 void i915_gem_object_init(struct drm_i915_gem_object *obj,
                          const struct drm_i915_gem_object_ops *ops)
 {
@@ -63,17 +54,14 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
        spin_lock_init(&obj->vma.lock);
        INIT_LIST_HEAD(&obj->vma.list);
 
+       INIT_LIST_HEAD(&obj->mm.link);
+
        INIT_LIST_HEAD(&obj->lut_list);
-       INIT_LIST_HEAD(&obj->batch_pool_link);
 
        init_rcu_head(&obj->rcu);
 
        obj->ops = ops;
 
-       obj->frontbuffer_ggtt_origin = ORIGIN_GTT;
-       i915_active_request_init(&obj->frontbuffer_write,
-                                NULL, frontbuffer_retire);
-
        obj->mm.madv = I915_MADV_WILLNEED;
        INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN);
        mutex_init(&obj->mm.get_page.lock);
@@ -185,7 +173,6 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
 
                GEM_BUG_ON(atomic_read(&obj->bind_count));
                GEM_BUG_ON(obj->userfault_count);
-               GEM_BUG_ON(atomic_read(&obj->frontbuffer_bits));
                GEM_BUG_ON(!list_empty(&obj->lut_list));
 
                atomic_set(&obj->mm.pages_pin_count, 0);
@@ -209,48 +196,18 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
 
 void i915_gem_flush_free_objects(struct drm_i915_private *i915)
 {
-       struct llist_node *freed;
-
-       /* Free the oldest, most stale object to keep the free_list short */
-       freed = NULL;
-       if (!llist_empty(&i915->mm.free_list)) { /* quick test for hotpath */
-               /* Only one consumer of llist_del_first() allowed */
-               spin_lock(&i915->mm.free_lock);
-               freed = llist_del_first(&i915->mm.free_list);
-               spin_unlock(&i915->mm.free_lock);
-       }
-       if (unlikely(freed)) {
-               freed->next = NULL;
+       struct llist_node *freed = llist_del_all(&i915->mm.free_list);
+
+       if (unlikely(freed))
                __i915_gem_free_objects(i915, freed);
-       }
 }
 
 static void __i915_gem_free_work(struct work_struct *work)
 {
        struct drm_i915_private *i915 =
                container_of(work, struct drm_i915_private, mm.free_work);
-       struct llist_node *freed;
 
-       /*
-        * All file-owned VMA should have been released by this point through
-        * i915_gem_close_object(), or earlier by i915_gem_context_close().
-        * However, the object may also be bound into the global GTT (e.g.
-        * older GPUs without per-process support, or for direct access through
-        * the GTT either for the user or for scanout). Those VMA still need to
-        * unbound now.
-        */
-
-       spin_lock(&i915->mm.free_lock);
-       while ((freed = llist_del_all(&i915->mm.free_list))) {
-               spin_unlock(&i915->mm.free_lock);
-
-               __i915_gem_free_objects(i915, freed);
-               if (need_resched())
-                       return;
-
-               spin_lock(&i915->mm.free_lock);
-       }
-       spin_unlock(&i915->mm.free_lock);
+       i915_gem_flush_free_objects(i915);
 }
 
 void i915_gem_free_object(struct drm_gem_object *gem_obj)
@@ -258,6 +215,8 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
        struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
        struct drm_i915_private *i915 = to_i915(obj->base.dev);
 
+       GEM_BUG_ON(i915_gem_object_is_framebuffer(obj));
+
        /*
         * Before we free the object, make sure any pure RCU-only
         * read-side critical sections are complete, e.g.
@@ -273,14 +232,7 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
         * or else we may oom whilst there are plenty of deferred
         * freed objects.
         */
-       if (i915_gem_object_has_pages(obj) &&
-           i915_gem_object_is_shrinkable(obj)) {
-               unsigned long flags;
-
-               spin_lock_irqsave(&i915->mm.obj_lock, flags);
-               list_del_init(&obj->mm.link);
-               spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
-       }
+       i915_gem_object_make_unshrinkable(obj);
 
        /*
         * Since we require blocking on struct_mutex to unbind the freed
@@ -296,13 +248,6 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
                queue_work(i915->wq, &i915->mm.free_work);
 }
 
-static inline enum fb_op_origin
-fb_write_origin(struct drm_i915_gem_object *obj, unsigned int domain)
-{
-       return (domain == I915_GEM_DOMAIN_GTT ?
-               obj->frontbuffer_ggtt_origin : ORIGIN_CPU);
-}
-
 static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj)
 {
        return !(obj->cache_level == I915_CACHE_NONE ||
@@ -325,8 +270,7 @@ i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj,
                for_each_ggtt_vma(vma, obj)
                        intel_gt_flush_ggtt_writes(vma->vm->gt);
 
-               intel_fb_obj_flush(obj,
-                                  fb_write_origin(obj, I915_GEM_DOMAIN_GTT));
+               intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_CPU);
 
                for_each_ggtt_vma(vma, obj) {
                        if (vma->iomap)