2 * SPDX-License-Identifier: MIT
4 * Copyright © 2016 Intel Corporation
7 #ifndef __I915_GEM_OBJECT_H__
8 #define __I915_GEM_OBJECT_H__
10 #include <drm/drm_gem.h>
11 #include <drm/drm_file.h>
12 #include <drm/drm_device.h>
14 #include <drm/i915_drm.h>
16 #include "i915_gem_object_types.h"
18 #include "i915_gem_gtt.h"
20 void i915_gem_init__objects(struct drm_i915_private *i915);
22 struct drm_i915_gem_object *i915_gem_object_alloc(void);
23 void i915_gem_object_free(struct drm_i915_gem_object *obj);
25 void i915_gem_object_init(struct drm_i915_gem_object *obj,
26 const struct drm_i915_gem_object_ops *ops);
27 struct drm_i915_gem_object *
28 i915_gem_object_create_shmem(struct drm_i915_private *i915, u64 size);
29 struct drm_i915_gem_object *
30 i915_gem_object_create_shmem_from_data(struct drm_i915_private *i915,
31 const void *data, size_t size);
33 extern const struct drm_i915_gem_object_ops i915_gem_shmem_ops;
34 void __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
35 struct sg_table *pages,
38 int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align);
40 void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file);
41 void i915_gem_free_object(struct drm_gem_object *obj);
43 void i915_gem_flush_free_objects(struct drm_i915_private *i915);
46 __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj);
47 void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
50 * i915_gem_object_lookup_rcu - look up a temporary GEM object from its handle
51 * @filp: DRM file private date
52 * @handle: userspace handle
56 * A pointer to the object named by the handle if such exists on @filp, NULL
57 * otherwise. This object is only valid whilst under the RCU read lock, and
58 * note carefully the object may be in the process of being destroyed.
60 static inline struct drm_i915_gem_object *
61 i915_gem_object_lookup_rcu(struct drm_file *file, u32 handle)
64 WARN_ON(debug_locks && !lock_is_held(&rcu_lock_map));
66 return idr_find(&file->object_idr, handle);
69 static inline struct drm_i915_gem_object *
70 i915_gem_object_lookup(struct drm_file *file, u32 handle)
72 struct drm_i915_gem_object *obj;
75 obj = i915_gem_object_lookup_rcu(file, handle);
76 if (obj && !kref_get_unless_zero(&obj->base.refcount))
84 extern struct drm_gem_object *
85 drm_gem_object_lookup(struct drm_file *file, u32 handle);
87 __attribute__((nonnull))
88 static inline struct drm_i915_gem_object *
89 i915_gem_object_get(struct drm_i915_gem_object *obj)
91 drm_gem_object_get(&obj->base);
95 __attribute__((nonnull))
97 i915_gem_object_put(struct drm_i915_gem_object *obj)
99 __drm_gem_object_put(&obj->base);
102 static inline void i915_gem_object_lock(struct drm_i915_gem_object *obj)
104 reservation_object_lock(obj->resv, NULL);
107 static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj)
109 reservation_object_unlock(obj->resv);
113 i915_gem_object_set_readonly(struct drm_i915_gem_object *obj)
115 obj->base.vma_node.readonly = true;
119 i915_gem_object_is_readonly(const struct drm_i915_gem_object *obj)
121 return obj->base.vma_node.readonly;
125 i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
127 return obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE;
131 i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj)
133 return obj->ops->flags & I915_GEM_OBJECT_IS_SHRINKABLE;
137 i915_gem_object_is_proxy(const struct drm_i915_gem_object *obj)
139 return obj->ops->flags & I915_GEM_OBJECT_IS_PROXY;
143 i915_gem_object_needs_async_cancel(const struct drm_i915_gem_object *obj)
145 return obj->ops->flags & I915_GEM_OBJECT_ASYNC_CANCEL;
149 i915_gem_object_is_active(const struct drm_i915_gem_object *obj)
151 return obj->active_count;
155 i915_gem_object_has_active_reference(const struct drm_i915_gem_object *obj)
157 return test_bit(I915_BO_ACTIVE_REF, &obj->flags);
161 i915_gem_object_set_active_reference(struct drm_i915_gem_object *obj)
163 lockdep_assert_held(&obj->base.dev->struct_mutex);
164 __set_bit(I915_BO_ACTIVE_REF, &obj->flags);
168 i915_gem_object_clear_active_reference(struct drm_i915_gem_object *obj)
170 lockdep_assert_held(&obj->base.dev->struct_mutex);
171 __clear_bit(I915_BO_ACTIVE_REF, &obj->flags);
174 void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj);
177 i915_gem_object_is_framebuffer(const struct drm_i915_gem_object *obj)
179 return READ_ONCE(obj->framebuffer_references);
182 static inline unsigned int
183 i915_gem_object_get_tiling(const struct drm_i915_gem_object *obj)
185 return obj->tiling_and_stride & TILING_MASK;
189 i915_gem_object_is_tiled(const struct drm_i915_gem_object *obj)
191 return i915_gem_object_get_tiling(obj) != I915_TILING_NONE;
194 static inline unsigned int
195 i915_gem_object_get_stride(const struct drm_i915_gem_object *obj)
197 return obj->tiling_and_stride & STRIDE_MASK;
200 static inline unsigned int
201 i915_gem_tile_height(unsigned int tiling)
204 return tiling == I915_TILING_Y ? 32 : 8;
207 static inline unsigned int
208 i915_gem_object_get_tile_height(const struct drm_i915_gem_object *obj)
210 return i915_gem_tile_height(i915_gem_object_get_tiling(obj));
213 static inline unsigned int
214 i915_gem_object_get_tile_row_size(const struct drm_i915_gem_object *obj)
216 return (i915_gem_object_get_stride(obj) *
217 i915_gem_object_get_tile_height(obj));
220 int i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
221 unsigned int tiling, unsigned int stride);
224 i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
225 unsigned int n, unsigned int *offset);
228 i915_gem_object_get_page(struct drm_i915_gem_object *obj,
232 i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
236 i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
241 i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
244 void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
245 struct sg_table *pages,
246 unsigned int sg_page_sizes);
248 int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
249 int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
251 static inline int __must_check
252 i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
254 might_lock(&obj->mm.lock);
256 if (atomic_inc_not_zero(&obj->mm.pages_pin_count))
259 return __i915_gem_object_get_pages(obj);
263 i915_gem_object_has_pages(struct drm_i915_gem_object *obj)
265 return !IS_ERR_OR_NULL(READ_ONCE(obj->mm.pages));
269 __i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
271 GEM_BUG_ON(!i915_gem_object_has_pages(obj));
273 atomic_inc(&obj->mm.pages_pin_count);
277 i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj)
279 return atomic_read(&obj->mm.pages_pin_count);
283 __i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
285 GEM_BUG_ON(!i915_gem_object_has_pages(obj));
286 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
288 atomic_dec(&obj->mm.pages_pin_count);
292 i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
294 __i915_gem_object_unpin_pages(obj);
297 enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock/struct_mutex */
299 I915_MM_SHRINKER /* called "recursively" from direct-reclaim-esque */
302 int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
303 enum i915_mm_subclass subclass);
304 void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
305 void i915_gem_object_writeback(struct drm_i915_gem_object *obj);
310 #define I915_MAP_OVERRIDE BIT(31)
311 I915_MAP_FORCE_WB = I915_MAP_WB | I915_MAP_OVERRIDE,
312 I915_MAP_FORCE_WC = I915_MAP_WC | I915_MAP_OVERRIDE,
316 * i915_gem_object_pin_map - return a contiguous mapping of the entire object
317 * @obj: the object to map into kernel address space
318 * @type: the type of mapping, used to select pgprot_t
320 * Calls i915_gem_object_pin_pages() to prevent reaping of the object's
321 * pages and then returns a contiguous mapping of the backing storage into
322 * the kernel address space. Based on the @type of mapping, the PTE will be
323 * set to either WriteBack or WriteCombine (via pgprot_t).
325 * The caller is responsible for calling i915_gem_object_unpin_map() when the
326 * mapping is no longer required.
328 * Returns the pointer through which to access the mapped object, or an
329 * ERR_PTR() on error.
331 void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
332 enum i915_map_type type);
334 void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
335 unsigned long offset,
337 static inline void i915_gem_object_flush_map(struct drm_i915_gem_object *obj)
339 __i915_gem_object_flush_map(obj, 0, obj->base.size);
343 * i915_gem_object_unpin_map - releases an earlier mapping
344 * @obj: the object to unmap
346 * After pinning the object and mapping its pages, once you are finished
347 * with your access, call i915_gem_object_unpin_map() to release the pin
348 * upon the mapping. Once the pin count reaches zero, that mapping may be
351 static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj)
353 i915_gem_object_unpin_pages(obj);
356 void __i915_gem_object_release_mmap(struct drm_i915_gem_object *obj);
357 void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj);
360 i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj,
361 unsigned int flush_domains);
363 int i915_gem_object_prepare_read(struct drm_i915_gem_object *obj,
364 unsigned int *needs_clflush);
365 int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj,
366 unsigned int *needs_clflush);
367 #define CLFLUSH_BEFORE BIT(0)
368 #define CLFLUSH_AFTER BIT(1)
369 #define CLFLUSH_FLAGS (CLFLUSH_BEFORE | CLFLUSH_AFTER)
372 i915_gem_object_finish_access(struct drm_i915_gem_object *obj)
374 i915_gem_object_unpin_pages(obj);
377 static inline struct intel_engine_cs *
378 i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj)
380 struct intel_engine_cs *engine = NULL;
381 struct dma_fence *fence;
384 fence = reservation_object_get_excl_rcu(obj->resv);
387 if (fence && dma_fence_is_i915(fence) && !dma_fence_is_signaled(fence))
388 engine = to_request(fence)->engine;
389 dma_fence_put(fence);
394 void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
395 unsigned int cache_level);
396 void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj);
399 i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write);
401 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write);
403 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
404 struct i915_vma * __must_check
405 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
407 const struct i915_ggtt_view *view,
409 void i915_gem_object_unpin_from_display_plane(struct i915_vma *vma);
411 static inline bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
413 if (obj->cache_dirty)
416 if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
419 return obj->pin_global; /* currently in use by HW, keep flushed */
422 static inline void __start_cpu_write(struct drm_i915_gem_object *obj)
424 obj->read_domains = I915_GEM_DOMAIN_CPU;
425 obj->write_domain = I915_GEM_DOMAIN_CPU;
426 if (cpu_write_needs_clflush(obj))
427 obj->cache_dirty = true;