2 * Copyright © 2008-2010 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
25 * Chris Wilson <chris@chris-wilson.co.uuk>
30 #include <drm/i915_drm.h>
33 #include "intel_drv.h"
34 #include "i915_trace.h"
36 static bool ggtt_is_idle(struct drm_i915_private *dev_priv)
38 struct i915_ggtt *ggtt = &dev_priv->ggtt;
39 struct intel_engine_cs *engine;
40 enum intel_engine_id id;
42 for_each_engine(engine, dev_priv, id) {
43 struct intel_timeline *tl;
45 tl = &ggtt->base.timeline.engine[engine->id];
46 if (i915_gem_active_isset(&tl->last_request))
54 mark_free(struct i915_vma *vma, unsigned int flags, struct list_head *unwind)
56 if (i915_vma_is_pinned(vma))
59 if (WARN_ON(!list_empty(&vma->exec_list)))
62 if (flags & PIN_NONFAULT && !list_empty(&vma->obj->userfault_link))
65 list_add(&vma->exec_list, unwind);
66 return drm_mm_scan_add_block(&vma->node);
70 * i915_gem_evict_something - Evict vmas to make room for binding a new one
71 * @vm: address space to evict from
72 * @min_size: size of the desired free space
73 * @alignment: alignment constraint of the desired free space
74 * @cache_level: cache_level for the desired space
75 * @start: start (inclusive) of the range from which to evict objects
76 * @end: end (exclusive) of the range from which to evict objects
77 * @flags: additional flags to control the eviction algorithm
79 * This function will try to evict vmas until a free space satisfying the
80 * requirements is found. Callers must check first whether any such hole exists
81 * already before calling this function.
83 * This function is used by the object/vma binding code.
85 * Since this function is only used to free up virtual address space it only
86 * ignores pinned vmas, and not object where the backing storage itself is
87 * pinned. Hence obj->pages_pin_count does not protect against eviction.
89 * To clarify: This is for freeing up virtual address space, not for freeing
90 * memory in e.g. the shrinker.
93 i915_gem_evict_something(struct i915_address_space *vm,
94 u64 min_size, u64 alignment,
99 struct drm_i915_private *dev_priv = to_i915(vm->dev);
100 struct list_head eviction_list;
101 struct list_head *phases[] = {
106 struct i915_vma *vma, *next;
109 lockdep_assert_held(&vm->dev->struct_mutex);
110 trace_i915_gem_evict(vm, min_size, alignment, flags);
113 * The goal is to evict objects and amalgamate space in LRU order.
114 * The oldest idle objects reside on the inactive list, which is in
115 * retirement order. The next objects to retire are those in flight,
116 * on the active list, again in retirement order.
118 * The retirement sequence is thus:
119 * 1. Inactive objects (already retired)
120 * 2. Active objects (will stall on unbinding)
122 * On each list, the oldest objects lie at the HEAD with the freshest
123 * object on the TAIL.
125 if (start != 0 || end != vm->total) {
126 drm_mm_init_scan_with_range(&vm->mm, min_size,
127 alignment, cache_level,
130 drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level);
132 if (flags & PIN_NONBLOCK)
136 INIT_LIST_HEAD(&eviction_list);
139 list_for_each_entry(vma, *phase, vm_link)
140 if (mark_free(vma, flags, &eviction_list))
144 /* Nothing found, clean up and bail out! */
145 list_for_each_entry_safe(vma, next, &eviction_list, exec_list) {
146 ret = drm_mm_scan_remove_block(&vma->node);
149 INIT_LIST_HEAD(&vma->exec_list);
152 /* Can we unpin some objects such as idle hw contents,
153 * or pending flips? But since only the GGTT has global entries
154 * such as scanouts, rinbuffers and contexts, we can skip the
155 * purge when inspecting per-process local address spaces.
157 if (!i915_is_ggtt(vm) || flags & PIN_NONBLOCK)
160 if (ggtt_is_idle(dev_priv)) {
161 /* If we still have pending pageflip completions, drop
162 * back to userspace to give our workqueues time to
163 * acquire our locks and unpin the old scanouts.
165 return intel_has_pending_fb_unpin(vm->dev) ? -EAGAIN : -ENOSPC;
168 /* Not everything in the GGTT is tracked via vma (otherwise we
169 * could evict as required with minimal stalling) so we are forced
170 * to idle the GPU and explicitly retire outstanding requests in
171 * the hopes that we can then remove contexts and the like only
172 * bound by their active reference.
174 ret = i915_gem_switch_to_kernel_context(dev_priv);
178 ret = i915_gem_wait_for_idle(dev_priv,
179 I915_WAIT_INTERRUPTIBLE |
184 i915_gem_retire_requests(dev_priv);
188 /* drm_mm doesn't allow any other other operations while
189 * scanning, therefore store to-be-evicted objects on a
190 * temporary list and take a reference for all before
191 * calling unbind (which may remove the active reference
192 * of any of our objects, thus corrupting the list).
194 list_for_each_entry_safe(vma, next, &eviction_list, exec_list) {
195 if (drm_mm_scan_remove_block(&vma->node))
198 list_del_init(&vma->exec_list);
201 /* Unbinding will emit any required flushes */
203 while (!list_empty(&eviction_list)) {
204 vma = list_first_entry(&eviction_list,
208 list_del_init(&vma->exec_list);
209 __i915_vma_unpin(vma);
211 ret = i915_vma_unbind(vma);
217 i915_gem_evict_for_vma(struct i915_vma *target)
219 struct drm_mm_node *node, *next;
221 lockdep_assert_held(&target->vm->dev->struct_mutex);
223 list_for_each_entry_safe(node, next,
224 &target->vm->mm.head_node.node_list,
226 struct i915_vma *vma;
229 if (node->start + node->size <= target->node.start)
231 if (node->start >= target->node.start + target->node.size)
234 vma = container_of(node, typeof(*vma), node);
236 if (i915_vma_is_pinned(vma)) {
237 if (!vma->exec_entry || i915_vma_pin_count(vma) > 1)
238 /* Object is pinned for some other use */
241 /* We need to evict a buffer in the same batch */
242 if (vma->exec_entry->flags & EXEC_OBJECT_PINNED)
243 /* Overlapping fixed objects in the same batch */
249 ret = i915_vma_unbind(vma);
258 * i915_gem_evict_vm - Evict all idle vmas from a vm
259 * @vm: Address space to cleanse
260 * @do_idle: Boolean directing whether to idle first.
262 * This function evicts all idles vmas from a vm. If all unpinned vmas should be
263 * evicted the @do_idle needs to be set to true.
265 * This is used by the execbuf code as a last-ditch effort to defragment the
268 * To clarify: This is for freeing up virtual address space, not for freeing
269 * memory in e.g. the shrinker.
271 int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
273 struct i915_vma *vma, *next;
276 lockdep_assert_held(&vm->dev->struct_mutex);
277 trace_i915_gem_evict_vm(vm);
280 struct drm_i915_private *dev_priv = to_i915(vm->dev);
282 if (i915_is_ggtt(vm)) {
283 ret = i915_gem_switch_to_kernel_context(dev_priv);
288 ret = i915_gem_wait_for_idle(dev_priv,
289 I915_WAIT_INTERRUPTIBLE |
294 i915_gem_retire_requests(dev_priv);
295 WARN_ON(!list_empty(&vm->active_list));
298 list_for_each_entry_safe(vma, next, &vm->inactive_list, vm_link)
299 if (!i915_vma_is_pinned(vma))
300 WARN_ON(i915_vma_unbind(vma));