]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/gpu/drm/i915/gem/i915_gem_object.h
8cf082abb0ab947de8144bac5c25ceab50f8b391
[linux.git] / drivers / gpu / drm / i915 / gem / i915_gem_object.h
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2016 Intel Corporation
5  */
6
7 #ifndef __I915_GEM_OBJECT_H__
8 #define __I915_GEM_OBJECT_H__
9
10 #include <drm/drm_gem.h>
11 #include <drm/drm_file.h>
12 #include <drm/drm_device.h>
13
14 #include <drm/i915_drm.h>
15
16 #include "i915_gem_object_types.h"
17
18 #include "i915_gem_gtt.h"
19
20 void i915_gem_init__objects(struct drm_i915_private *i915);
21
22 struct drm_i915_gem_object *i915_gem_object_alloc(void);
23 void i915_gem_object_free(struct drm_i915_gem_object *obj);
24
25 void i915_gem_object_init(struct drm_i915_gem_object *obj,
26                           const struct drm_i915_gem_object_ops *ops);
27 struct drm_i915_gem_object *
28 i915_gem_object_create_shmem(struct drm_i915_private *i915, u64 size);
29 struct drm_i915_gem_object *
30 i915_gem_object_create_shmem_from_data(struct drm_i915_private *i915,
31                                        const void *data, size_t size);
32
33 extern const struct drm_i915_gem_object_ops i915_gem_shmem_ops;
34 void __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
35                                      struct sg_table *pages,
36                                      bool needs_clflush);
37
38 int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align);
39
40 void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file);
41 void i915_gem_free_object(struct drm_gem_object *obj);
42
43 void i915_gem_flush_free_objects(struct drm_i915_private *i915);
44
45 struct sg_table *
46 __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj);
47 void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
48
49 /**
50  * i915_gem_object_lookup_rcu - look up a temporary GEM object from its handle
51  * @filp: DRM file private date
52  * @handle: userspace handle
53  *
54  * Returns:
55  *
56  * A pointer to the object named by the handle if such exists on @filp, NULL
57  * otherwise. This object is only valid whilst under the RCU read lock, and
58  * note carefully the object may be in the process of being destroyed.
59  */
60 static inline struct drm_i915_gem_object *
61 i915_gem_object_lookup_rcu(struct drm_file *file, u32 handle)
62 {
63 #ifdef CONFIG_LOCKDEP
64         WARN_ON(debug_locks && !lock_is_held(&rcu_lock_map));
65 #endif
66         return idr_find(&file->object_idr, handle);
67 }
68
69 static inline struct drm_i915_gem_object *
70 i915_gem_object_lookup(struct drm_file *file, u32 handle)
71 {
72         struct drm_i915_gem_object *obj;
73
74         rcu_read_lock();
75         obj = i915_gem_object_lookup_rcu(file, handle);
76         if (obj && !kref_get_unless_zero(&obj->base.refcount))
77                 obj = NULL;
78         rcu_read_unlock();
79
80         return obj;
81 }
82
83 __deprecated
84 extern struct drm_gem_object *
85 drm_gem_object_lookup(struct drm_file *file, u32 handle);
86
87 __attribute__((nonnull))
88 static inline struct drm_i915_gem_object *
89 i915_gem_object_get(struct drm_i915_gem_object *obj)
90 {
91         drm_gem_object_get(&obj->base);
92         return obj;
93 }
94
95 __attribute__((nonnull))
96 static inline void
97 i915_gem_object_put(struct drm_i915_gem_object *obj)
98 {
99         __drm_gem_object_put(&obj->base);
100 }
101
102 static inline void i915_gem_object_lock(struct drm_i915_gem_object *obj)
103 {
104         reservation_object_lock(obj->resv, NULL);
105 }
106
107 static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj)
108 {
109         reservation_object_unlock(obj->resv);
110 }
111
112 static inline void
113 i915_gem_object_set_readonly(struct drm_i915_gem_object *obj)
114 {
115         obj->base.vma_node.readonly = true;
116 }
117
118 static inline bool
119 i915_gem_object_is_readonly(const struct drm_i915_gem_object *obj)
120 {
121         return obj->base.vma_node.readonly;
122 }
123
124 static inline bool
125 i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
126 {
127         return obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE;
128 }
129
130 static inline bool
131 i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj)
132 {
133         return obj->ops->flags & I915_GEM_OBJECT_IS_SHRINKABLE;
134 }
135
136 static inline bool
137 i915_gem_object_is_proxy(const struct drm_i915_gem_object *obj)
138 {
139         return obj->ops->flags & I915_GEM_OBJECT_IS_PROXY;
140 }
141
142 static inline bool
143 i915_gem_object_needs_async_cancel(const struct drm_i915_gem_object *obj)
144 {
145         return obj->ops->flags & I915_GEM_OBJECT_ASYNC_CANCEL;
146 }
147
148 static inline bool
149 i915_gem_object_is_active(const struct drm_i915_gem_object *obj)
150 {
151         return obj->active_count;
152 }
153
154 static inline bool
155 i915_gem_object_has_active_reference(const struct drm_i915_gem_object *obj)
156 {
157         return test_bit(I915_BO_ACTIVE_REF, &obj->flags);
158 }
159
160 static inline void
161 i915_gem_object_set_active_reference(struct drm_i915_gem_object *obj)
162 {
163         lockdep_assert_held(&obj->base.dev->struct_mutex);
164         __set_bit(I915_BO_ACTIVE_REF, &obj->flags);
165 }
166
167 static inline void
168 i915_gem_object_clear_active_reference(struct drm_i915_gem_object *obj)
169 {
170         lockdep_assert_held(&obj->base.dev->struct_mutex);
171         __clear_bit(I915_BO_ACTIVE_REF, &obj->flags);
172 }
173
174 void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj);
175
176 static inline bool
177 i915_gem_object_is_framebuffer(const struct drm_i915_gem_object *obj)
178 {
179         return READ_ONCE(obj->framebuffer_references);
180 }
181
182 static inline unsigned int
183 i915_gem_object_get_tiling(const struct drm_i915_gem_object *obj)
184 {
185         return obj->tiling_and_stride & TILING_MASK;
186 }
187
188 static inline bool
189 i915_gem_object_is_tiled(const struct drm_i915_gem_object *obj)
190 {
191         return i915_gem_object_get_tiling(obj) != I915_TILING_NONE;
192 }
193
194 static inline unsigned int
195 i915_gem_object_get_stride(const struct drm_i915_gem_object *obj)
196 {
197         return obj->tiling_and_stride & STRIDE_MASK;
198 }
199
200 static inline unsigned int
201 i915_gem_tile_height(unsigned int tiling)
202 {
203         GEM_BUG_ON(!tiling);
204         return tiling == I915_TILING_Y ? 32 : 8;
205 }
206
207 static inline unsigned int
208 i915_gem_object_get_tile_height(const struct drm_i915_gem_object *obj)
209 {
210         return i915_gem_tile_height(i915_gem_object_get_tiling(obj));
211 }
212
213 static inline unsigned int
214 i915_gem_object_get_tile_row_size(const struct drm_i915_gem_object *obj)
215 {
216         return (i915_gem_object_get_stride(obj) *
217                 i915_gem_object_get_tile_height(obj));
218 }
219
220 int i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
221                                unsigned int tiling, unsigned int stride);
222
223 struct scatterlist *
224 i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
225                        unsigned int n, unsigned int *offset);
226
227 struct page *
228 i915_gem_object_get_page(struct drm_i915_gem_object *obj,
229                          unsigned int n);
230
231 struct page *
232 i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
233                                unsigned int n);
234
235 dma_addr_t
236 i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
237                                     unsigned long n,
238                                     unsigned int *len);
239
240 dma_addr_t
241 i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
242                                 unsigned long n);
243
244 void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
245                                  struct sg_table *pages,
246                                  unsigned int sg_page_sizes);
247
248 int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
249 int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
250
251 static inline int __must_check
252 i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
253 {
254         might_lock(&obj->mm.lock);
255
256         if (atomic_inc_not_zero(&obj->mm.pages_pin_count))
257                 return 0;
258
259         return __i915_gem_object_get_pages(obj);
260 }
261
262 static inline bool
263 i915_gem_object_has_pages(struct drm_i915_gem_object *obj)
264 {
265         return !IS_ERR_OR_NULL(READ_ONCE(obj->mm.pages));
266 }
267
268 static inline void
269 __i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
270 {
271         GEM_BUG_ON(!i915_gem_object_has_pages(obj));
272
273         atomic_inc(&obj->mm.pages_pin_count);
274 }
275
276 static inline bool
277 i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj)
278 {
279         return atomic_read(&obj->mm.pages_pin_count);
280 }
281
282 static inline void
283 __i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
284 {
285         GEM_BUG_ON(!i915_gem_object_has_pages(obj));
286         GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
287
288         atomic_dec(&obj->mm.pages_pin_count);
289 }
290
291 static inline void
292 i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
293 {
294         __i915_gem_object_unpin_pages(obj);
295 }
296
297 enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock/struct_mutex */
298         I915_MM_NORMAL = 0,
299         I915_MM_SHRINKER /* called "recursively" from direct-reclaim-esque */
300 };
301
302 int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
303                                 enum i915_mm_subclass subclass);
304 void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
305 void i915_gem_object_writeback(struct drm_i915_gem_object *obj);
306
307 enum i915_map_type {
308         I915_MAP_WB = 0,
309         I915_MAP_WC,
310 #define I915_MAP_OVERRIDE BIT(31)
311         I915_MAP_FORCE_WB = I915_MAP_WB | I915_MAP_OVERRIDE,
312         I915_MAP_FORCE_WC = I915_MAP_WC | I915_MAP_OVERRIDE,
313 };
314
315 /**
316  * i915_gem_object_pin_map - return a contiguous mapping of the entire object
317  * @obj: the object to map into kernel address space
318  * @type: the type of mapping, used to select pgprot_t
319  *
320  * Calls i915_gem_object_pin_pages() to prevent reaping of the object's
321  * pages and then returns a contiguous mapping of the backing storage into
322  * the kernel address space. Based on the @type of mapping, the PTE will be
323  * set to either WriteBack or WriteCombine (via pgprot_t).
324  *
325  * The caller is responsible for calling i915_gem_object_unpin_map() when the
326  * mapping is no longer required.
327  *
328  * Returns the pointer through which to access the mapped object, or an
329  * ERR_PTR() on error.
330  */
331 void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
332                                            enum i915_map_type type);
333
334 void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
335                                  unsigned long offset,
336                                  unsigned long size);
337 static inline void i915_gem_object_flush_map(struct drm_i915_gem_object *obj)
338 {
339         __i915_gem_object_flush_map(obj, 0, obj->base.size);
340 }
341
342 /**
343  * i915_gem_object_unpin_map - releases an earlier mapping
344  * @obj: the object to unmap
345  *
346  * After pinning the object and mapping its pages, once you are finished
347  * with your access, call i915_gem_object_unpin_map() to release the pin
348  * upon the mapping. Once the pin count reaches zero, that mapping may be
349  * removed.
350  */
351 static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj)
352 {
353         i915_gem_object_unpin_pages(obj);
354 }
355
356 void __i915_gem_object_release_mmap(struct drm_i915_gem_object *obj);
357 void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj);
358
359 void
360 i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj,
361                                    unsigned int flush_domains);
362
363 int i915_gem_object_prepare_read(struct drm_i915_gem_object *obj,
364                                  unsigned int *needs_clflush);
365 int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj,
366                                   unsigned int *needs_clflush);
367 #define CLFLUSH_BEFORE  BIT(0)
368 #define CLFLUSH_AFTER   BIT(1)
369 #define CLFLUSH_FLAGS   (CLFLUSH_BEFORE | CLFLUSH_AFTER)
370
371 static inline void
372 i915_gem_object_finish_access(struct drm_i915_gem_object *obj)
373 {
374         i915_gem_object_unpin_pages(obj);
375 }
376
377 static inline struct intel_engine_cs *
378 i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj)
379 {
380         struct intel_engine_cs *engine = NULL;
381         struct dma_fence *fence;
382
383         rcu_read_lock();
384         fence = reservation_object_get_excl_rcu(obj->resv);
385         rcu_read_unlock();
386
387         if (fence && dma_fence_is_i915(fence) && !dma_fence_is_signaled(fence))
388                 engine = to_request(fence)->engine;
389         dma_fence_put(fence);
390
391         return engine;
392 }
393
394 void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
395                                          unsigned int cache_level);
396 void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj);
397
398 int __must_check
399 i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write);
400 int __must_check
401 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write);
402 int __must_check
403 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
404 struct i915_vma * __must_check
405 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
406                                      u32 alignment,
407                                      const struct i915_ggtt_view *view,
408                                      unsigned int flags);
409 void i915_gem_object_unpin_from_display_plane(struct i915_vma *vma);
410
411 static inline bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
412 {
413         if (obj->cache_dirty)
414                 return false;
415
416         if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
417                 return true;
418
419         return obj->pin_global; /* currently in use by HW, keep flushed */
420 }
421
422 static inline void __start_cpu_write(struct drm_i915_gem_object *obj)
423 {
424         obj->read_domains = I915_GEM_DOMAIN_CPU;
425         obj->write_domain = I915_GEM_DOMAIN_CPU;
426         if (cpu_write_needs_clflush(obj))
427                 obj->cache_dirty = true;
428 }
429
430 #endif