]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/gpu/drm/i915/gem/i915_gem_clflush.c
Merge branch 'asoc-5.3' into asoc-linus
[linux.git] / drivers / gpu / drm / i915 / gem / i915_gem_clflush.c
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2016 Intel Corporation
5  */
6
7 #include "display/intel_frontbuffer.h"
8
9 #include "i915_drv.h"
10 #include "i915_gem_clflush.h"
11
12 static DEFINE_SPINLOCK(clflush_lock);
13
14 struct clflush {
15         struct dma_fence dma; /* Must be first for dma_fence_free() */
16         struct i915_sw_fence wait;
17         struct work_struct work;
18         struct drm_i915_gem_object *obj;
19 };
20
21 static const char *i915_clflush_get_driver_name(struct dma_fence *fence)
22 {
23         return DRIVER_NAME;
24 }
25
26 static const char *i915_clflush_get_timeline_name(struct dma_fence *fence)
27 {
28         return "clflush";
29 }
30
31 static void i915_clflush_release(struct dma_fence *fence)
32 {
33         struct clflush *clflush = container_of(fence, typeof(*clflush), dma);
34
35         i915_sw_fence_fini(&clflush->wait);
36
37         BUILD_BUG_ON(offsetof(typeof(*clflush), dma));
38         dma_fence_free(&clflush->dma);
39 }
40
41 static const struct dma_fence_ops i915_clflush_ops = {
42         .get_driver_name = i915_clflush_get_driver_name,
43         .get_timeline_name = i915_clflush_get_timeline_name,
44         .release = i915_clflush_release,
45 };
46
47 static void __i915_do_clflush(struct drm_i915_gem_object *obj)
48 {
49         GEM_BUG_ON(!i915_gem_object_has_pages(obj));
50         drm_clflush_sg(obj->mm.pages);
51         intel_fb_obj_flush(obj, ORIGIN_CPU);
52 }
53
54 static void i915_clflush_work(struct work_struct *work)
55 {
56         struct clflush *clflush = container_of(work, typeof(*clflush), work);
57         struct drm_i915_gem_object *obj = clflush->obj;
58
59         if (i915_gem_object_pin_pages(obj)) {
60                 DRM_ERROR("Failed to acquire obj->pages for clflushing\n");
61                 goto out;
62         }
63
64         __i915_do_clflush(obj);
65
66         i915_gem_object_unpin_pages(obj);
67
68 out:
69         i915_gem_object_put(obj);
70
71         dma_fence_signal(&clflush->dma);
72         dma_fence_put(&clflush->dma);
73 }
74
75 static int __i915_sw_fence_call
76 i915_clflush_notify(struct i915_sw_fence *fence,
77                     enum i915_sw_fence_notify state)
78 {
79         struct clflush *clflush = container_of(fence, typeof(*clflush), wait);
80
81         switch (state) {
82         case FENCE_COMPLETE:
83                 schedule_work(&clflush->work);
84                 break;
85
86         case FENCE_FREE:
87                 dma_fence_put(&clflush->dma);
88                 break;
89         }
90
91         return NOTIFY_DONE;
92 }
93
94 bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
95                              unsigned int flags)
96 {
97         struct clflush *clflush;
98
99         assert_object_held(obj);
100
101         /*
102          * Stolen memory is always coherent with the GPU as it is explicitly
103          * marked as wc by the system, or the system is cache-coherent.
104          * Similarly, we only access struct pages through the CPU cache, so
105          * anything not backed by physical memory we consider to be always
106          * coherent and not need clflushing.
107          */
108         if (!i915_gem_object_has_struct_page(obj)) {
109                 obj->cache_dirty = false;
110                 return false;
111         }
112
113         /* If the GPU is snooping the contents of the CPU cache,
114          * we do not need to manually clear the CPU cache lines.  However,
115          * the caches are only snooped when the render cache is
116          * flushed/invalidated.  As we always have to emit invalidations
117          * and flushes when moving into and out of the RENDER domain, correct
118          * snooping behaviour occurs naturally as the result of our domain
119          * tracking.
120          */
121         if (!(flags & I915_CLFLUSH_FORCE) &&
122             obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)
123                 return false;
124
125         trace_i915_gem_object_clflush(obj);
126
127         clflush = NULL;
128         if (!(flags & I915_CLFLUSH_SYNC))
129                 clflush = kmalloc(sizeof(*clflush), GFP_KERNEL);
130         if (clflush) {
131                 GEM_BUG_ON(!obj->cache_dirty);
132
133                 dma_fence_init(&clflush->dma,
134                                &i915_clflush_ops,
135                                &clflush_lock,
136                                to_i915(obj->base.dev)->mm.unordered_timeline,
137                                0);
138                 i915_sw_fence_init(&clflush->wait, i915_clflush_notify);
139
140                 clflush->obj = i915_gem_object_get(obj);
141                 INIT_WORK(&clflush->work, i915_clflush_work);
142
143                 dma_fence_get(&clflush->dma);
144
145                 i915_sw_fence_await_reservation(&clflush->wait,
146                                                 obj->base.resv, NULL,
147                                                 true, I915_FENCE_TIMEOUT,
148                                                 I915_FENCE_GFP);
149
150                 reservation_object_add_excl_fence(obj->base.resv,
151                                                   &clflush->dma);
152
153                 i915_sw_fence_commit(&clflush->wait);
154         } else if (obj->mm.pages) {
155                 __i915_do_clflush(obj);
156         } else {
157                 GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU);
158         }
159
160         obj->cache_dirty = false;
161         return true;
162 }