2 * SPDX-License-Identifier: MIT
4 * Copyright © 2016 Intel Corporation
7 #include "display/intel_frontbuffer.h"
10 #include "i915_gem_clflush.h"
12 static DEFINE_SPINLOCK(clflush_lock);
15 struct dma_fence dma; /* Must be first for dma_fence_free() */
16 struct i915_sw_fence wait;
17 struct work_struct work;
18 struct drm_i915_gem_object *obj;
21 static const char *i915_clflush_get_driver_name(struct dma_fence *fence)
26 static const char *i915_clflush_get_timeline_name(struct dma_fence *fence)
31 static void i915_clflush_release(struct dma_fence *fence)
33 struct clflush *clflush = container_of(fence, typeof(*clflush), dma);
35 i915_sw_fence_fini(&clflush->wait);
37 BUILD_BUG_ON(offsetof(typeof(*clflush), dma));
38 dma_fence_free(&clflush->dma);
41 static const struct dma_fence_ops i915_clflush_ops = {
42 .get_driver_name = i915_clflush_get_driver_name,
43 .get_timeline_name = i915_clflush_get_timeline_name,
44 .release = i915_clflush_release,
47 static void __i915_do_clflush(struct drm_i915_gem_object *obj)
49 GEM_BUG_ON(!i915_gem_object_has_pages(obj));
50 drm_clflush_sg(obj->mm.pages);
51 intel_fb_obj_flush(obj, ORIGIN_CPU);
54 static void i915_clflush_work(struct work_struct *work)
56 struct clflush *clflush = container_of(work, typeof(*clflush), work);
57 struct drm_i915_gem_object *obj = clflush->obj;
59 if (i915_gem_object_pin_pages(obj)) {
60 DRM_ERROR("Failed to acquire obj->pages for clflushing\n");
64 __i915_do_clflush(obj);
66 i915_gem_object_unpin_pages(obj);
69 i915_gem_object_put(obj);
71 dma_fence_signal(&clflush->dma);
72 dma_fence_put(&clflush->dma);
75 static int __i915_sw_fence_call
76 i915_clflush_notify(struct i915_sw_fence *fence,
77 enum i915_sw_fence_notify state)
79 struct clflush *clflush = container_of(fence, typeof(*clflush), wait);
83 schedule_work(&clflush->work);
87 dma_fence_put(&clflush->dma);
94 bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
97 struct clflush *clflush;
99 assert_object_held(obj);
102 * Stolen memory is always coherent with the GPU as it is explicitly
103 * marked as wc by the system, or the system is cache-coherent.
104 * Similarly, we only access struct pages through the CPU cache, so
105 * anything not backed by physical memory we consider to be always
106 * coherent and not need clflushing.
108 if (!i915_gem_object_has_struct_page(obj)) {
109 obj->cache_dirty = false;
113 /* If the GPU is snooping the contents of the CPU cache,
114 * we do not need to manually clear the CPU cache lines. However,
115 * the caches are only snooped when the render cache is
116 * flushed/invalidated. As we always have to emit invalidations
117 * and flushes when moving into and out of the RENDER domain, correct
118 * snooping behaviour occurs naturally as the result of our domain
121 if (!(flags & I915_CLFLUSH_FORCE) &&
122 obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)
125 trace_i915_gem_object_clflush(obj);
128 if (!(flags & I915_CLFLUSH_SYNC))
129 clflush = kmalloc(sizeof(*clflush), GFP_KERNEL);
131 GEM_BUG_ON(!obj->cache_dirty);
133 dma_fence_init(&clflush->dma,
136 to_i915(obj->base.dev)->mm.unordered_timeline,
138 i915_sw_fence_init(&clflush->wait, i915_clflush_notify);
140 clflush->obj = i915_gem_object_get(obj);
141 INIT_WORK(&clflush->work, i915_clflush_work);
143 dma_fence_get(&clflush->dma);
145 i915_sw_fence_await_reservation(&clflush->wait,
146 obj->base.resv, NULL,
147 true, I915_FENCE_TIMEOUT,
150 reservation_object_add_excl_fence(obj->base.resv,
153 i915_sw_fence_commit(&clflush->wait);
154 } else if (obj->mm.pages) {
155 __i915_do_clflush(obj);
157 GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU);
160 obj->cache_dirty = false;