]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/gpu/drm/i915/i915_vma.c
cf405ffda045202c2a60406777ac7a1838189ba3
[linux.git] / drivers / gpu / drm / i915 / i915_vma.c
1 /*
2  * Copyright © 2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24
25 #include "gt/intel_engine.h"
26
27 #include "i915_vma.h"
28
29 #include "i915_drv.h"
30 #include "i915_globals.h"
31 #include "intel_frontbuffer.h"
32
33 #include <drm/drm_gem.h>
34
35 static struct i915_global_vma {
36         struct i915_global base;
37         struct kmem_cache *slab_vmas;
38 } global;
39
40 struct i915_vma *i915_vma_alloc(void)
41 {
42         return kmem_cache_zalloc(global.slab_vmas, GFP_KERNEL);
43 }
44
45 void i915_vma_free(struct i915_vma *vma)
46 {
47         return kmem_cache_free(global.slab_vmas, vma);
48 }
49
50 #if IS_ENABLED(CONFIG_DRM_I915_ERRLOG_GEM) && IS_ENABLED(CONFIG_DRM_DEBUG_MM)
51
52 #include <linux/stackdepot.h>
53
54 static void vma_print_allocator(struct i915_vma *vma, const char *reason)
55 {
56         unsigned long *entries;
57         unsigned int nr_entries;
58         char buf[512];
59
60         if (!vma->node.stack) {
61                 DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: unknown owner\n",
62                                  vma->node.start, vma->node.size, reason);
63                 return;
64         }
65
66         nr_entries = stack_depot_fetch(vma->node.stack, &entries);
67         stack_trace_snprint(buf, sizeof(buf), entries, nr_entries, 0);
68         DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: inserted at %s\n",
69                          vma->node.start, vma->node.size, reason, buf);
70 }
71
72 #else
73
74 static void vma_print_allocator(struct i915_vma *vma, const char *reason)
75 {
76 }
77
78 #endif
79
80 static void obj_bump_mru(struct drm_i915_gem_object *obj)
81 {
82         struct drm_i915_private *i915 = to_i915(obj->base.dev);
83
84         spin_lock(&i915->mm.obj_lock);
85         if (obj->bind_count)
86                 list_move_tail(&obj->mm.link, &i915->mm.bound_list);
87         spin_unlock(&i915->mm.obj_lock);
88
89         obj->mm.dirty = true; /* be paranoid  */
90 }
91
92 static void __i915_vma_retire(struct i915_active *ref)
93 {
94         struct i915_vma *vma = container_of(ref, typeof(*vma), active);
95         struct drm_i915_gem_object *obj = vma->obj;
96
97         GEM_BUG_ON(!i915_gem_object_is_active(obj));
98         if (--obj->active_count)
99                 return;
100
101         /* Prune the shared fence arrays iff completely idle (inc. external) */
102         if (reservation_object_trylock(obj->resv)) {
103                 if (reservation_object_test_signaled_rcu(obj->resv, true))
104                         reservation_object_add_excl_fence(obj->resv, NULL);
105                 reservation_object_unlock(obj->resv);
106         }
107
108         /*
109          * Bump our place on the bound list to keep it roughly in LRU order
110          * so that we don't steal from recently used but inactive objects
111          * (unless we are forced to ofc!)
112          */
113         obj_bump_mru(obj);
114
115         if (i915_gem_object_has_active_reference(obj)) {
116                 i915_gem_object_clear_active_reference(obj);
117                 i915_gem_object_put(obj);
118         }
119 }
120
121 static struct i915_vma *
122 vma_create(struct drm_i915_gem_object *obj,
123            struct i915_address_space *vm,
124            const struct i915_ggtt_view *view)
125 {
126         struct i915_vma *vma;
127         struct rb_node *rb, **p;
128
129         /* The aliasing_ppgtt should never be used directly! */
130         GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm);
131
132         vma = i915_vma_alloc();
133         if (vma == NULL)
134                 return ERR_PTR(-ENOMEM);
135
136         i915_active_init(vm->i915, &vma->active, __i915_vma_retire);
137         INIT_ACTIVE_REQUEST(&vma->last_fence);
138
139         vma->vm = vm;
140         vma->ops = &vm->vma_ops;
141         vma->obj = obj;
142         vma->resv = obj->resv;
143         vma->size = obj->base.size;
144         vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
145
146         if (view && view->type != I915_GGTT_VIEW_NORMAL) {
147                 vma->ggtt_view = *view;
148                 if (view->type == I915_GGTT_VIEW_PARTIAL) {
149                         GEM_BUG_ON(range_overflows_t(u64,
150                                                      view->partial.offset,
151                                                      view->partial.size,
152                                                      obj->base.size >> PAGE_SHIFT));
153                         vma->size = view->partial.size;
154                         vma->size <<= PAGE_SHIFT;
155                         GEM_BUG_ON(vma->size > obj->base.size);
156                 } else if (view->type == I915_GGTT_VIEW_ROTATED) {
157                         vma->size = intel_rotation_info_size(&view->rotated);
158                         vma->size <<= PAGE_SHIFT;
159                 } else if (view->type == I915_GGTT_VIEW_REMAPPED) {
160                         vma->size = intel_remapped_info_size(&view->remapped);
161                         vma->size <<= PAGE_SHIFT;
162                 }
163         }
164
165         if (unlikely(vma->size > vm->total))
166                 goto err_vma;
167
168         GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE));
169
170         if (i915_is_ggtt(vm)) {
171                 if (unlikely(overflows_type(vma->size, u32)))
172                         goto err_vma;
173
174                 vma->fence_size = i915_gem_fence_size(vm->i915, vma->size,
175                                                       i915_gem_object_get_tiling(obj),
176                                                       i915_gem_object_get_stride(obj));
177                 if (unlikely(vma->fence_size < vma->size || /* overflow */
178                              vma->fence_size > vm->total))
179                         goto err_vma;
180
181                 GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT));
182
183                 vma->fence_alignment = i915_gem_fence_alignment(vm->i915, vma->size,
184                                                                 i915_gem_object_get_tiling(obj),
185                                                                 i915_gem_object_get_stride(obj));
186                 GEM_BUG_ON(!is_power_of_2(vma->fence_alignment));
187
188                 vma->flags |= I915_VMA_GGTT;
189         }
190
191         spin_lock(&obj->vma.lock);
192
193         rb = NULL;
194         p = &obj->vma.tree.rb_node;
195         while (*p) {
196                 struct i915_vma *pos;
197                 long cmp;
198
199                 rb = *p;
200                 pos = rb_entry(rb, struct i915_vma, obj_node);
201
202                 /*
203                  * If the view already exists in the tree, another thread
204                  * already created a matching vma, so return the older instance
205                  * and dispose of ours.
206                  */
207                 cmp = i915_vma_compare(pos, vm, view);
208                 if (cmp == 0) {
209                         spin_unlock(&obj->vma.lock);
210                         i915_vma_free(vma);
211                         return pos;
212                 }
213
214                 if (cmp < 0)
215                         p = &rb->rb_right;
216                 else
217                         p = &rb->rb_left;
218         }
219         rb_link_node(&vma->obj_node, rb, p);
220         rb_insert_color(&vma->obj_node, &obj->vma.tree);
221
222         if (i915_vma_is_ggtt(vma))
223                 /*
224                  * We put the GGTT vma at the start of the vma-list, followed
225                  * by the ppGGTT vma. This allows us to break early when
226                  * iterating over only the GGTT vma for an object, see
227                  * for_each_ggtt_vma()
228                  */
229                 list_add(&vma->obj_link, &obj->vma.list);
230         else
231                 list_add_tail(&vma->obj_link, &obj->vma.list);
232
233         spin_unlock(&obj->vma.lock);
234
235         mutex_lock(&vm->mutex);
236         list_add(&vma->vm_link, &vm->unbound_list);
237         mutex_unlock(&vm->mutex);
238
239         return vma;
240
241 err_vma:
242         i915_vma_free(vma);
243         return ERR_PTR(-E2BIG);
244 }
245
246 static struct i915_vma *
247 vma_lookup(struct drm_i915_gem_object *obj,
248            struct i915_address_space *vm,
249            const struct i915_ggtt_view *view)
250 {
251         struct rb_node *rb;
252
253         rb = obj->vma.tree.rb_node;
254         while (rb) {
255                 struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node);
256                 long cmp;
257
258                 cmp = i915_vma_compare(vma, vm, view);
259                 if (cmp == 0)
260                         return vma;
261
262                 if (cmp < 0)
263                         rb = rb->rb_right;
264                 else
265                         rb = rb->rb_left;
266         }
267
268         return NULL;
269 }
270
271 /**
272  * i915_vma_instance - return the singleton instance of the VMA
273  * @obj: parent &struct drm_i915_gem_object to be mapped
274  * @vm: address space in which the mapping is located
275  * @view: additional mapping requirements
276  *
277  * i915_vma_instance() looks up an existing VMA of the @obj in the @vm with
278  * the same @view characteristics. If a match is not found, one is created.
279  * Once created, the VMA is kept until either the object is freed, or the
280  * address space is closed.
281  *
282  * Must be called with struct_mutex held.
283  *
284  * Returns the vma, or an error pointer.
285  */
286 struct i915_vma *
287 i915_vma_instance(struct drm_i915_gem_object *obj,
288                   struct i915_address_space *vm,
289                   const struct i915_ggtt_view *view)
290 {
291         struct i915_vma *vma;
292
293         GEM_BUG_ON(view && !i915_is_ggtt(vm));
294         GEM_BUG_ON(vm->closed);
295
296         spin_lock(&obj->vma.lock);
297         vma = vma_lookup(obj, vm, view);
298         spin_unlock(&obj->vma.lock);
299
300         /* vma_create() will resolve the race if another creates the vma */
301         if (unlikely(!vma))
302                 vma = vma_create(obj, vm, view);
303
304         GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view));
305         return vma;
306 }
307
308 /**
309  * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
310  * @vma: VMA to map
311  * @cache_level: mapping cache level
312  * @flags: flags like global or local mapping
313  *
314  * DMA addresses are taken from the scatter-gather table of this object (or of
315  * this VMA in case of non-default GGTT views) and PTE entries set up.
316  * Note that DMA addresses are also the only part of the SG table we care about.
317  */
318 int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
319                   u32 flags)
320 {
321         u32 bind_flags;
322         u32 vma_flags;
323         int ret;
324
325         GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
326         GEM_BUG_ON(vma->size > vma->node.size);
327
328         if (GEM_DEBUG_WARN_ON(range_overflows(vma->node.start,
329                                               vma->node.size,
330                                               vma->vm->total)))
331                 return -ENODEV;
332
333         if (GEM_DEBUG_WARN_ON(!flags))
334                 return -EINVAL;
335
336         bind_flags = 0;
337         if (flags & PIN_GLOBAL)
338                 bind_flags |= I915_VMA_GLOBAL_BIND;
339         if (flags & PIN_USER)
340                 bind_flags |= I915_VMA_LOCAL_BIND;
341
342         vma_flags = vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
343         if (flags & PIN_UPDATE)
344                 bind_flags |= vma_flags;
345         else
346                 bind_flags &= ~vma_flags;
347         if (bind_flags == 0)
348                 return 0;
349
350         GEM_BUG_ON(!vma->pages);
351
352         trace_i915_vma_bind(vma, bind_flags);
353         ret = vma->ops->bind_vma(vma, cache_level, bind_flags);
354         if (ret)
355                 return ret;
356
357         vma->flags |= bind_flags;
358         return 0;
359 }
360
361 void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
362 {
363         void __iomem *ptr;
364         int err;
365
366         /* Access through the GTT requires the device to be awake. */
367         assert_rpm_wakelock_held(vma->vm->i915);
368
369         lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
370         if (WARN_ON(!i915_vma_is_map_and_fenceable(vma))) {
371                 err = -ENODEV;
372                 goto err;
373         }
374
375         GEM_BUG_ON(!i915_vma_is_ggtt(vma));
376         GEM_BUG_ON((vma->flags & I915_VMA_GLOBAL_BIND) == 0);
377
378         ptr = vma->iomap;
379         if (ptr == NULL) {
380                 ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap,
381                                         vma->node.start,
382                                         vma->node.size);
383                 if (ptr == NULL) {
384                         err = -ENOMEM;
385                         goto err;
386                 }
387
388                 vma->iomap = ptr;
389         }
390
391         __i915_vma_pin(vma);
392
393         err = i915_vma_pin_fence(vma);
394         if (err)
395                 goto err_unpin;
396
397         i915_vma_set_ggtt_write(vma);
398         return ptr;
399
400 err_unpin:
401         __i915_vma_unpin(vma);
402 err:
403         return IO_ERR_PTR(err);
404 }
405
406 void i915_vma_flush_writes(struct i915_vma *vma)
407 {
408         if (!i915_vma_has_ggtt_write(vma))
409                 return;
410
411         i915_gem_flush_ggtt_writes(vma->vm->i915);
412
413         i915_vma_unset_ggtt_write(vma);
414 }
415
416 void i915_vma_unpin_iomap(struct i915_vma *vma)
417 {
418         lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
419
420         GEM_BUG_ON(vma->iomap == NULL);
421
422         i915_vma_flush_writes(vma);
423
424         i915_vma_unpin_fence(vma);
425         i915_vma_unpin(vma);
426 }
427
428 void i915_vma_unpin_and_release(struct i915_vma **p_vma, unsigned int flags)
429 {
430         struct i915_vma *vma;
431         struct drm_i915_gem_object *obj;
432
433         vma = fetch_and_zero(p_vma);
434         if (!vma)
435                 return;
436
437         obj = vma->obj;
438         GEM_BUG_ON(!obj);
439
440         i915_vma_unpin(vma);
441         i915_vma_close(vma);
442
443         if (flags & I915_VMA_RELEASE_MAP)
444                 i915_gem_object_unpin_map(obj);
445
446         __i915_gem_object_release_unless_active(obj);
447 }
448
449 bool i915_vma_misplaced(const struct i915_vma *vma,
450                         u64 size, u64 alignment, u64 flags)
451 {
452         if (!drm_mm_node_allocated(&vma->node))
453                 return false;
454
455         if (vma->node.size < size)
456                 return true;
457
458         GEM_BUG_ON(alignment && !is_power_of_2(alignment));
459         if (alignment && !IS_ALIGNED(vma->node.start, alignment))
460                 return true;
461
462         if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma))
463                 return true;
464
465         if (flags & PIN_OFFSET_BIAS &&
466             vma->node.start < (flags & PIN_OFFSET_MASK))
467                 return true;
468
469         if (flags & PIN_OFFSET_FIXED &&
470             vma->node.start != (flags & PIN_OFFSET_MASK))
471                 return true;
472
473         return false;
474 }
475
476 void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
477 {
478         bool mappable, fenceable;
479
480         GEM_BUG_ON(!i915_vma_is_ggtt(vma));
481         GEM_BUG_ON(!vma->fence_size);
482
483         fenceable = (vma->node.size >= vma->fence_size &&
484                      IS_ALIGNED(vma->node.start, vma->fence_alignment));
485
486         mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end;
487
488         if (mappable && fenceable)
489                 vma->flags |= I915_VMA_CAN_FENCE;
490         else
491                 vma->flags &= ~I915_VMA_CAN_FENCE;
492 }
493
494 static bool color_differs(struct drm_mm_node *node, unsigned long color)
495 {
496         return node->allocated && node->color != color;
497 }
498
499 bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level)
500 {
501         struct drm_mm_node *node = &vma->node;
502         struct drm_mm_node *other;
503
504         /*
505          * On some machines we have to be careful when putting differing types
506          * of snoopable memory together to avoid the prefetcher crossing memory
507          * domains and dying. During vm initialisation, we decide whether or not
508          * these constraints apply and set the drm_mm.color_adjust
509          * appropriately.
510          */
511         if (vma->vm->mm.color_adjust == NULL)
512                 return true;
513
514         /* Only valid to be called on an already inserted vma */
515         GEM_BUG_ON(!drm_mm_node_allocated(node));
516         GEM_BUG_ON(list_empty(&node->node_list));
517
518         other = list_prev_entry(node, node_list);
519         if (color_differs(other, cache_level) && !drm_mm_hole_follows(other))
520                 return false;
521
522         other = list_next_entry(node, node_list);
523         if (color_differs(other, cache_level) && !drm_mm_hole_follows(node))
524                 return false;
525
526         return true;
527 }
528
529 static void assert_bind_count(const struct drm_i915_gem_object *obj)
530 {
531         /*
532          * Combine the assertion that the object is bound and that we have
533          * pinned its pages. But we should never have bound the object
534          * more than we have pinned its pages. (For complete accuracy, we
535          * assume that no else is pinning the pages, but as a rough assertion
536          * that we will not run into problems later, this will do!)
537          */
538         GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
539 }
540
541 /**
542  * i915_vma_insert - finds a slot for the vma in its address space
543  * @vma: the vma
544  * @size: requested size in bytes (can be larger than the VMA)
545  * @alignment: required alignment
546  * @flags: mask of PIN_* flags to use
547  *
548  * First we try to allocate some free space that meets the requirements for
549  * the VMA. Failiing that, if the flags permit, it will evict an old VMA,
550  * preferrably the oldest idle entry to make room for the new VMA.
551  *
552  * Returns:
553  * 0 on success, negative error code otherwise.
554  */
555 static int
556 i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
557 {
558         struct drm_i915_private *dev_priv = vma->vm->i915;
559         unsigned int cache_level;
560         u64 start, end;
561         int ret;
562
563         GEM_BUG_ON(i915_vma_is_closed(vma));
564         GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
565         GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
566
567         size = max(size, vma->size);
568         alignment = max(alignment, vma->display_alignment);
569         if (flags & PIN_MAPPABLE) {
570                 size = max_t(typeof(size), size, vma->fence_size);
571                 alignment = max_t(typeof(alignment),
572                                   alignment, vma->fence_alignment);
573         }
574
575         GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
576         GEM_BUG_ON(!IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
577         GEM_BUG_ON(!is_power_of_2(alignment));
578
579         start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
580         GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
581
582         end = vma->vm->total;
583         if (flags & PIN_MAPPABLE)
584                 end = min_t(u64, end, dev_priv->ggtt.mappable_end);
585         if (flags & PIN_ZONE_4G)
586                 end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE);
587         GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
588
589         /* If binding the object/GGTT view requires more space than the entire
590          * aperture has, reject it early before evicting everything in a vain
591          * attempt to find space.
592          */
593         if (size > end) {
594                 DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu > %s aperture=%llu\n",
595                           size, flags & PIN_MAPPABLE ? "mappable" : "total",
596                           end);
597                 return -ENOSPC;
598         }
599
600         if (vma->obj) {
601                 ret = i915_gem_object_pin_pages(vma->obj);
602                 if (ret)
603                         return ret;
604
605                 cache_level = vma->obj->cache_level;
606         } else {
607                 cache_level = 0;
608         }
609
610         GEM_BUG_ON(vma->pages);
611
612         ret = vma->ops->set_pages(vma);
613         if (ret)
614                 goto err_unpin;
615
616         if (flags & PIN_OFFSET_FIXED) {
617                 u64 offset = flags & PIN_OFFSET_MASK;
618                 if (!IS_ALIGNED(offset, alignment) ||
619                     range_overflows(offset, size, end)) {
620                         ret = -EINVAL;
621                         goto err_clear;
622                 }
623
624                 ret = i915_gem_gtt_reserve(vma->vm, &vma->node,
625                                            size, offset, cache_level,
626                                            flags);
627                 if (ret)
628                         goto err_clear;
629         } else {
630                 /*
631                  * We only support huge gtt pages through the 48b PPGTT,
632                  * however we also don't want to force any alignment for
633                  * objects which need to be tightly packed into the low 32bits.
634                  *
635                  * Note that we assume that GGTT are limited to 4GiB for the
636                  * forseeable future. See also i915_ggtt_offset().
637                  */
638                 if (upper_32_bits(end - 1) &&
639                     vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
640                         /*
641                          * We can't mix 64K and 4K PTEs in the same page-table
642                          * (2M block), and so to avoid the ugliness and
643                          * complexity of coloring we opt for just aligning 64K
644                          * objects to 2M.
645                          */
646                         u64 page_alignment =
647                                 rounddown_pow_of_two(vma->page_sizes.sg |
648                                                      I915_GTT_PAGE_SIZE_2M);
649
650                         /*
651                          * Check we don't expand for the limited Global GTT
652                          * (mappable aperture is even more precious!). This
653                          * also checks that we exclude the aliasing-ppgtt.
654                          */
655                         GEM_BUG_ON(i915_vma_is_ggtt(vma));
656
657                         alignment = max(alignment, page_alignment);
658
659                         if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
660                                 size = round_up(size, I915_GTT_PAGE_SIZE_2M);
661                 }
662
663                 ret = i915_gem_gtt_insert(vma->vm, &vma->node,
664                                           size, alignment, cache_level,
665                                           start, end, flags);
666                 if (ret)
667                         goto err_clear;
668
669                 GEM_BUG_ON(vma->node.start < start);
670                 GEM_BUG_ON(vma->node.start + vma->node.size > end);
671         }
672         GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
673         GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, cache_level));
674
675         mutex_lock(&vma->vm->mutex);
676         list_move_tail(&vma->vm_link, &vma->vm->bound_list);
677         mutex_unlock(&vma->vm->mutex);
678
679         if (vma->obj) {
680                 struct drm_i915_gem_object *obj = vma->obj;
681
682                 spin_lock(&dev_priv->mm.obj_lock);
683                 list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list);
684                 obj->bind_count++;
685                 spin_unlock(&dev_priv->mm.obj_lock);
686
687                 assert_bind_count(obj);
688         }
689
690         return 0;
691
692 err_clear:
693         vma->ops->clear_pages(vma);
694 err_unpin:
695         if (vma->obj)
696                 i915_gem_object_unpin_pages(vma->obj);
697         return ret;
698 }
699
700 static void
701 i915_vma_remove(struct i915_vma *vma)
702 {
703         struct drm_i915_private *i915 = vma->vm->i915;
704
705         GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
706         GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
707
708         vma->ops->clear_pages(vma);
709
710         mutex_lock(&vma->vm->mutex);
711         drm_mm_remove_node(&vma->node);
712         list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
713         mutex_unlock(&vma->vm->mutex);
714
715         /*
716          * Since the unbound list is global, only move to that list if
717          * no more VMAs exist.
718          */
719         if (vma->obj) {
720                 struct drm_i915_gem_object *obj = vma->obj;
721
722                 spin_lock(&i915->mm.obj_lock);
723                 if (--obj->bind_count == 0)
724                         list_move_tail(&obj->mm.link, &i915->mm.unbound_list);
725                 spin_unlock(&i915->mm.obj_lock);
726
727                 /*
728                  * And finally now the object is completely decoupled from this
729                  * vma, we can drop its hold on the backing storage and allow
730                  * it to be reaped by the shrinker.
731                  */
732                 i915_gem_object_unpin_pages(obj);
733                 assert_bind_count(obj);
734         }
735 }
736
737 int __i915_vma_do_pin(struct i915_vma *vma,
738                       u64 size, u64 alignment, u64 flags)
739 {
740         const unsigned int bound = vma->flags;
741         int ret;
742
743         lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
744         GEM_BUG_ON((flags & (PIN_GLOBAL | PIN_USER)) == 0);
745         GEM_BUG_ON((flags & PIN_GLOBAL) && !i915_vma_is_ggtt(vma));
746
747         if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) {
748                 ret = -EBUSY;
749                 goto err_unpin;
750         }
751
752         if ((bound & I915_VMA_BIND_MASK) == 0) {
753                 ret = i915_vma_insert(vma, size, alignment, flags);
754                 if (ret)
755                         goto err_unpin;
756         }
757         GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
758
759         ret = i915_vma_bind(vma, vma->obj ? vma->obj->cache_level : 0, flags);
760         if (ret)
761                 goto err_remove;
762
763         GEM_BUG_ON((vma->flags & I915_VMA_BIND_MASK) == 0);
764
765         if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND)
766                 __i915_vma_set_map_and_fenceable(vma);
767
768         GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
769         return 0;
770
771 err_remove:
772         if ((bound & I915_VMA_BIND_MASK) == 0) {
773                 i915_vma_remove(vma);
774                 GEM_BUG_ON(vma->pages);
775                 GEM_BUG_ON(vma->flags & I915_VMA_BIND_MASK);
776         }
777 err_unpin:
778         __i915_vma_unpin(vma);
779         return ret;
780 }
781
782 void i915_vma_close(struct i915_vma *vma)
783 {
784         lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
785
786         GEM_BUG_ON(i915_vma_is_closed(vma));
787         vma->flags |= I915_VMA_CLOSED;
788
789         /*
790          * We defer actually closing, unbinding and destroying the VMA until
791          * the next idle point, or if the object is freed in the meantime. By
792          * postponing the unbind, we allow for it to be resurrected by the
793          * client, avoiding the work required to rebind the VMA. This is
794          * advantageous for DRI, where the client/server pass objects
795          * between themselves, temporarily opening a local VMA to the
796          * object, and then closing it again. The same object is then reused
797          * on the next frame (or two, depending on the depth of the swap queue)
798          * causing us to rebind the VMA once more. This ends up being a lot
799          * of wasted work for the steady state.
800          */
801         list_add_tail(&vma->closed_link, &vma->vm->i915->gt.closed_vma);
802 }
803
804 void i915_vma_reopen(struct i915_vma *vma)
805 {
806         lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
807
808         if (vma->flags & I915_VMA_CLOSED) {
809                 vma->flags &= ~I915_VMA_CLOSED;
810                 list_del(&vma->closed_link);
811         }
812 }
813
814 static void __i915_vma_destroy(struct i915_vma *vma)
815 {
816         GEM_BUG_ON(vma->node.allocated);
817         GEM_BUG_ON(vma->fence);
818
819         GEM_BUG_ON(i915_active_request_isset(&vma->last_fence));
820
821         mutex_lock(&vma->vm->mutex);
822         list_del(&vma->vm_link);
823         mutex_unlock(&vma->vm->mutex);
824
825         if (vma->obj) {
826                 struct drm_i915_gem_object *obj = vma->obj;
827
828                 spin_lock(&obj->vma.lock);
829                 list_del(&vma->obj_link);
830                 rb_erase(&vma->obj_node, &vma->obj->vma.tree);
831                 spin_unlock(&obj->vma.lock);
832         }
833
834         i915_active_fini(&vma->active);
835
836         i915_vma_free(vma);
837 }
838
839 void i915_vma_destroy(struct i915_vma *vma)
840 {
841         lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
842
843         GEM_BUG_ON(i915_vma_is_active(vma));
844         GEM_BUG_ON(i915_vma_is_pinned(vma));
845
846         if (i915_vma_is_closed(vma))
847                 list_del(&vma->closed_link);
848
849         WARN_ON(i915_vma_unbind(vma));
850         __i915_vma_destroy(vma);
851 }
852
853 void i915_vma_parked(struct drm_i915_private *i915)
854 {
855         struct i915_vma *vma, *next;
856
857         list_for_each_entry_safe(vma, next, &i915->gt.closed_vma, closed_link) {
858                 GEM_BUG_ON(!i915_vma_is_closed(vma));
859                 i915_vma_destroy(vma);
860         }
861
862         GEM_BUG_ON(!list_empty(&i915->gt.closed_vma));
863 }
864
865 static void __i915_vma_iounmap(struct i915_vma *vma)
866 {
867         GEM_BUG_ON(i915_vma_is_pinned(vma));
868
869         if (vma->iomap == NULL)
870                 return;
871
872         io_mapping_unmap(vma->iomap);
873         vma->iomap = NULL;
874 }
875
876 void i915_vma_revoke_mmap(struct i915_vma *vma)
877 {
878         struct drm_vma_offset_node *node = &vma->obj->base.vma_node;
879         u64 vma_offset;
880
881         lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
882
883         if (!i915_vma_has_userfault(vma))
884                 return;
885
886         GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
887         GEM_BUG_ON(!vma->obj->userfault_count);
888
889         vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
890         unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping,
891                             drm_vma_node_offset_addr(node) + vma_offset,
892                             vma->size,
893                             1);
894
895         i915_vma_unset_userfault(vma);
896         if (!--vma->obj->userfault_count)
897                 list_del(&vma->obj->userfault_link);
898 }
899
900 static void export_fence(struct i915_vma *vma,
901                          struct i915_request *rq,
902                          unsigned int flags)
903 {
904         struct reservation_object *resv = vma->resv;
905
906         /*
907          * Ignore errors from failing to allocate the new fence, we can't
908          * handle an error right now. Worst case should be missed
909          * synchronisation leading to rendering corruption.
910          */
911         reservation_object_lock(resv, NULL);
912         if (flags & EXEC_OBJECT_WRITE)
913                 reservation_object_add_excl_fence(resv, &rq->fence);
914         else if (reservation_object_reserve_shared(resv, 1) == 0)
915                 reservation_object_add_shared_fence(resv, &rq->fence);
916         reservation_object_unlock(resv);
917 }
918
919 int i915_vma_move_to_active(struct i915_vma *vma,
920                             struct i915_request *rq,
921                             unsigned int flags)
922 {
923         struct drm_i915_gem_object *obj = vma->obj;
924
925         lockdep_assert_held(&rq->i915->drm.struct_mutex);
926         GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
927
928         /*
929          * Add a reference if we're newly entering the active list.
930          * The order in which we add operations to the retirement queue is
931          * vital here: mark_active adds to the start of the callback list,
932          * such that subsequent callbacks are called first. Therefore we
933          * add the active reference first and queue for it to be dropped
934          * *last*.
935          */
936         if (!vma->active.count)
937                 obj->active_count++;
938
939         if (unlikely(i915_active_ref(&vma->active, rq->fence.context, rq))) {
940                 if (!vma->active.count)
941                         obj->active_count--;
942                 return -ENOMEM;
943         }
944
945         GEM_BUG_ON(!i915_vma_is_active(vma));
946         GEM_BUG_ON(!obj->active_count);
947
948         obj->write_domain = 0;
949         if (flags & EXEC_OBJECT_WRITE) {
950                 obj->write_domain = I915_GEM_DOMAIN_RENDER;
951
952                 if (intel_fb_obj_invalidate(obj, ORIGIN_CS))
953                         __i915_active_request_set(&obj->frontbuffer_write, rq);
954
955                 obj->read_domains = 0;
956         }
957         obj->read_domains |= I915_GEM_GPU_DOMAINS;
958
959         if (flags & EXEC_OBJECT_NEEDS_FENCE)
960                 __i915_active_request_set(&vma->last_fence, rq);
961
962         export_fence(vma, rq, flags);
963         return 0;
964 }
965
966 int i915_vma_unbind(struct i915_vma *vma)
967 {
968         int ret;
969
970         lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
971
972         /*
973          * First wait upon any activity as retiring the request may
974          * have side-effects such as unpinning or even unbinding this vma.
975          */
976         might_sleep();
977         if (i915_vma_is_active(vma)) {
978                 /*
979                  * When a closed VMA is retired, it is unbound - eek.
980                  * In order to prevent it from being recursively closed,
981                  * take a pin on the vma so that the second unbind is
982                  * aborted.
983                  *
984                  * Even more scary is that the retire callback may free
985                  * the object (last active vma). To prevent the explosion
986                  * we defer the actual object free to a worker that can
987                  * only proceed once it acquires the struct_mutex (which
988                  * we currently hold, therefore it cannot free this object
989                  * before we are finished).
990                  */
991                 __i915_vma_pin(vma);
992
993                 ret = i915_active_wait(&vma->active);
994                 if (ret)
995                         goto unpin;
996
997                 ret = i915_active_request_retire(&vma->last_fence,
998                                               &vma->vm->i915->drm.struct_mutex);
999 unpin:
1000                 __i915_vma_unpin(vma);
1001                 if (ret)
1002                         return ret;
1003         }
1004         GEM_BUG_ON(i915_vma_is_active(vma));
1005
1006         if (i915_vma_is_pinned(vma)) {
1007                 vma_print_allocator(vma, "is pinned");
1008                 return -EBUSY;
1009         }
1010
1011         if (!drm_mm_node_allocated(&vma->node))
1012                 return 0;
1013
1014         if (i915_vma_is_map_and_fenceable(vma)) {
1015                 /*
1016                  * Check that we have flushed all writes through the GGTT
1017                  * before the unbind, other due to non-strict nature of those
1018                  * indirect writes they may end up referencing the GGTT PTE
1019                  * after the unbind.
1020                  */
1021                 i915_vma_flush_writes(vma);
1022                 GEM_BUG_ON(i915_vma_has_ggtt_write(vma));
1023
1024                 /* release the fence reg _after_ flushing */
1025                 ret = i915_vma_put_fence(vma);
1026                 if (ret)
1027                         return ret;
1028
1029                 /* Force a pagefault for domain tracking on next user access */
1030                 i915_vma_revoke_mmap(vma);
1031
1032                 __i915_vma_iounmap(vma);
1033                 vma->flags &= ~I915_VMA_CAN_FENCE;
1034         }
1035         GEM_BUG_ON(vma->fence);
1036         GEM_BUG_ON(i915_vma_has_userfault(vma));
1037
1038         if (likely(!vma->vm->closed)) {
1039                 trace_i915_vma_unbind(vma);
1040                 vma->ops->unbind_vma(vma);
1041         }
1042         vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
1043
1044         i915_vma_remove(vma);
1045
1046         return 0;
1047 }
1048
1049 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1050 #include "selftests/i915_vma.c"
1051 #endif
1052
1053 static void i915_global_vma_shrink(void)
1054 {
1055         kmem_cache_shrink(global.slab_vmas);
1056 }
1057
1058 static void i915_global_vma_exit(void)
1059 {
1060         kmem_cache_destroy(global.slab_vmas);
1061 }
1062
1063 static struct i915_global_vma global = { {
1064         .shrink = i915_global_vma_shrink,
1065         .exit = i915_global_vma_exit,
1066 } };
1067
1068 int __init i915_global_vma_init(void)
1069 {
1070         global.slab_vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
1071         if (!global.slab_vmas)
1072                 return -ENOMEM;
1073
1074         i915_global_register(&global.base);
1075         return 0;
1076 }