2 * Copyright © 2008,2010 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
25 * Chris Wilson <chris@chris-wilson.co.uk>
29 #include <linux/dma_remapping.h>
30 #include <linux/reservation.h>
31 #include <linux/sync_file.h>
32 #include <linux/uaccess.h>
35 #include <drm/i915_drm.h>
38 #include "i915_trace.h"
39 #include "intel_drv.h"
40 #include "intel_frontbuffer.h"
42 #define DBG_USE_CPU_RELOC 0 /* -1 force GTT relocs; 1 force CPU relocs */
44 #define __EXEC_OBJECT_HAS_PIN (1<<31)
45 #define __EXEC_OBJECT_HAS_FENCE (1<<30)
46 #define __EXEC_OBJECT_NEEDS_MAP (1<<29)
47 #define __EXEC_OBJECT_NEEDS_BIAS (1<<28)
48 #define __EXEC_OBJECT_INTERNAL_FLAGS (0xf<<28) /* all of the above */
50 #define BATCH_OFFSET_BIAS (256*1024)
52 struct i915_execbuffer_params {
53 struct drm_device *dev;
54 struct drm_file *file;
55 struct i915_vma *batch;
57 u32 args_batch_start_offset;
58 struct intel_engine_cs *engine;
59 struct i915_gem_context *ctx;
60 struct drm_i915_gem_request *request;
64 struct drm_i915_private *i915;
65 struct list_head vmas;
68 struct i915_vma *lut[0];
69 struct hlist_head buckets[0];
73 static struct eb_vmas *
74 eb_create(struct drm_i915_private *i915,
75 struct drm_i915_gem_execbuffer2 *args)
77 struct eb_vmas *eb = NULL;
79 if (args->flags & I915_EXEC_HANDLE_LUT) {
80 unsigned size = args->buffer_count;
81 size *= sizeof(struct i915_vma *);
82 size += sizeof(struct eb_vmas);
83 eb = kmalloc(size, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
87 unsigned size = args->buffer_count;
88 unsigned count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
89 BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head));
90 while (count > 2*size)
92 eb = kzalloc(count*sizeof(struct hlist_head) +
93 sizeof(struct eb_vmas),
100 eb->and = -args->buffer_count;
103 INIT_LIST_HEAD(&eb->vmas);
108 eb_reset(struct eb_vmas *eb)
111 memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
114 static struct i915_vma *
115 eb_get_batch(struct eb_vmas *eb)
117 struct i915_vma *vma = list_entry(eb->vmas.prev, typeof(*vma), exec_list);
120 * SNA is doing fancy tricks with compressing batch buffers, which leads
121 * to negative relocation deltas. Usually that works out ok since the
122 * relocate address is still positive, except when the batch is placed
123 * very low in the GTT. Ensure this doesn't happen.
125 * Note that actual hangs have only been observed on gen7, but for
126 * paranoia do it everywhere.
128 if ((vma->exec_entry->flags & EXEC_OBJECT_PINNED) == 0)
129 vma->exec_entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;
135 eb_lookup_vmas(struct eb_vmas *eb,
136 struct drm_i915_gem_exec_object2 *exec,
137 const struct drm_i915_gem_execbuffer2 *args,
138 struct i915_address_space *vm,
139 struct drm_file *file)
141 struct drm_i915_gem_object *obj;
142 struct list_head objects;
145 INIT_LIST_HEAD(&objects);
146 spin_lock(&file->table_lock);
147 /* Grab a reference to the object and release the lock so we can lookup
148 * or create the VMA without using GFP_ATOMIC */
149 for (i = 0; i < args->buffer_count; i++) {
150 obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle));
152 spin_unlock(&file->table_lock);
153 DRM_DEBUG("Invalid object handle %d at index %d\n",
159 if (!list_empty(&obj->obj_exec_link)) {
160 spin_unlock(&file->table_lock);
161 DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
162 obj, exec[i].handle, i);
167 i915_gem_object_get(obj);
168 list_add_tail(&obj->obj_exec_link, &objects);
170 spin_unlock(&file->table_lock);
173 while (!list_empty(&objects)) {
174 struct i915_vma *vma;
176 obj = list_first_entry(&objects,
177 struct drm_i915_gem_object,
181 * NOTE: We can leak any vmas created here when something fails
182 * later on. But that's no issue since vma_unbind can deal with
183 * vmas which are not actually bound. And since only
184 * lookup_or_create exists as an interface to get at the vma
185 * from the (obj, vm) we don't run the risk of creating
186 * duplicated vmas for the same vm.
188 vma = i915_vma_instance(obj, vm, NULL);
189 if (unlikely(IS_ERR(vma))) {
190 DRM_DEBUG("Failed to lookup VMA\n");
195 /* Transfer ownership from the objects list to the vmas list. */
196 list_add_tail(&vma->exec_list, &eb->vmas);
197 list_del_init(&obj->obj_exec_link);
199 vma->exec_entry = &exec[i];
203 uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle;
204 vma->exec_handle = handle;
205 hlist_add_head(&vma->exec_node,
206 &eb->buckets[handle & eb->and]);
215 while (!list_empty(&objects)) {
216 obj = list_first_entry(&objects,
217 struct drm_i915_gem_object,
219 list_del_init(&obj->obj_exec_link);
220 i915_gem_object_put(obj);
223 * Objects already transfered to the vmas list will be unreferenced by
230 static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle)
233 if (handle >= -eb->and)
235 return eb->lut[handle];
237 struct hlist_head *head;
238 struct i915_vma *vma;
240 head = &eb->buckets[handle & eb->and];
241 hlist_for_each_entry(vma, head, exec_node) {
242 if (vma->exec_handle == handle)
250 i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
252 struct drm_i915_gem_exec_object2 *entry;
254 if (!drm_mm_node_allocated(&vma->node))
257 entry = vma->exec_entry;
259 if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
260 i915_vma_unpin_fence(vma);
262 if (entry->flags & __EXEC_OBJECT_HAS_PIN)
263 __i915_vma_unpin(vma);
265 entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
268 static void eb_destroy(struct eb_vmas *eb)
270 while (!list_empty(&eb->vmas)) {
271 struct i915_vma *vma;
273 vma = list_first_entry(&eb->vmas,
276 list_del_init(&vma->exec_list);
277 i915_gem_execbuffer_unreserve_vma(vma);
278 vma->exec_entry = NULL;
284 static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
286 if (!i915_gem_object_has_struct_page(obj))
289 if (DBG_USE_CPU_RELOC)
290 return DBG_USE_CPU_RELOC > 0;
292 return (HAS_LLC(to_i915(obj->base.dev)) ||
293 obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
294 obj->cache_level != I915_CACHE_NONE);
297 /* Used to convert any address to canonical form.
298 * Starting from gen8, some commands (e.g. STATE_BASE_ADDRESS,
299 * MI_LOAD_REGISTER_MEM and others, see Broadwell PRM Vol2a) require the
300 * addresses to be in a canonical form:
301 * "GraphicsAddress[63:48] are ignored by the HW and assumed to be in correct
302 * canonical form [63:48] == [47]."
304 #define GEN8_HIGH_ADDRESS_BIT 47
305 static inline uint64_t gen8_canonical_addr(uint64_t address)
307 return sign_extend64(address, GEN8_HIGH_ADDRESS_BIT);
310 static inline uint64_t gen8_noncanonical_addr(uint64_t address)
312 return address & ((1ULL << (GEN8_HIGH_ADDRESS_BIT + 1)) - 1);
315 static inline uint64_t
316 relocation_target(const struct drm_i915_gem_relocation_entry *reloc,
317 uint64_t target_offset)
319 return gen8_canonical_addr((int)reloc->delta + target_offset);
323 struct drm_i915_private *i915;
324 struct drm_mm_node node;
327 bool use_64bit_reloc;
330 static void reloc_cache_init(struct reloc_cache *cache,
331 struct drm_i915_private *i915)
336 /* Must be a variable in the struct to allow GCC to unroll. */
337 cache->use_64bit_reloc = HAS_64BIT_RELOC(i915);
338 cache->node.allocated = false;
341 static inline void *unmask_page(unsigned long p)
343 return (void *)(uintptr_t)(p & PAGE_MASK);
346 static inline unsigned int unmask_flags(unsigned long p)
348 return p & ~PAGE_MASK;
351 #define KMAP 0x4 /* after CLFLUSH_FLAGS */
353 static void reloc_cache_fini(struct reloc_cache *cache)
360 vaddr = unmask_page(cache->vaddr);
361 if (cache->vaddr & KMAP) {
362 if (cache->vaddr & CLFLUSH_AFTER)
365 kunmap_atomic(vaddr);
366 i915_gem_obj_finish_shmem_access((struct drm_i915_gem_object *)cache->node.mm);
369 io_mapping_unmap_atomic((void __iomem *)vaddr);
370 if (cache->node.allocated) {
371 struct i915_ggtt *ggtt = &cache->i915->ggtt;
373 ggtt->base.clear_range(&ggtt->base,
376 drm_mm_remove_node(&cache->node);
378 i915_vma_unpin((struct i915_vma *)cache->node.mm);
383 static void *reloc_kmap(struct drm_i915_gem_object *obj,
384 struct reloc_cache *cache,
390 kunmap_atomic(unmask_page(cache->vaddr));
392 unsigned int flushes;
395 ret = i915_gem_obj_prepare_shmem_write(obj, &flushes);
399 BUILD_BUG_ON(KMAP & CLFLUSH_FLAGS);
400 BUILD_BUG_ON((KMAP | CLFLUSH_FLAGS) & PAGE_MASK);
402 cache->vaddr = flushes | KMAP;
403 cache->node.mm = (void *)obj;
408 vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj, page));
409 cache->vaddr = unmask_flags(cache->vaddr) | (unsigned long)vaddr;
415 static void *reloc_iomap(struct drm_i915_gem_object *obj,
416 struct reloc_cache *cache,
419 struct i915_ggtt *ggtt = &cache->i915->ggtt;
420 unsigned long offset;
424 io_mapping_unmap_atomic((void __force __iomem *) unmask_page(cache->vaddr));
426 struct i915_vma *vma;
429 if (use_cpu_reloc(obj))
432 ret = i915_gem_object_set_to_gtt_domain(obj, true);
436 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
437 PIN_MAPPABLE | PIN_NONBLOCK);
439 memset(&cache->node, 0, sizeof(cache->node));
440 ret = drm_mm_insert_node_in_range
441 (&ggtt->base.mm, &cache->node,
442 PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
443 0, ggtt->mappable_end,
445 if (ret) /* no inactive aperture space, use cpu reloc */
448 ret = i915_vma_put_fence(vma);
454 cache->node.start = vma->node.start;
455 cache->node.mm = (void *)vma;
459 offset = cache->node.start;
460 if (cache->node.allocated) {
462 ggtt->base.insert_page(&ggtt->base,
463 i915_gem_object_get_dma_address(obj, page),
464 offset, I915_CACHE_NONE, 0);
466 offset += page << PAGE_SHIFT;
469 vaddr = (void __force *) io_mapping_map_atomic_wc(&cache->i915->ggtt.mappable, offset);
471 cache->vaddr = (unsigned long)vaddr;
476 static void *reloc_vaddr(struct drm_i915_gem_object *obj,
477 struct reloc_cache *cache,
482 if (cache->page == page) {
483 vaddr = unmask_page(cache->vaddr);
486 if ((cache->vaddr & KMAP) == 0)
487 vaddr = reloc_iomap(obj, cache, page);
489 vaddr = reloc_kmap(obj, cache, page);
495 static void clflush_write32(u32 *addr, u32 value, unsigned int flushes)
497 if (unlikely(flushes & (CLFLUSH_BEFORE | CLFLUSH_AFTER))) {
498 if (flushes & CLFLUSH_BEFORE) {
505 /* Writes to the same cacheline are serialised by the CPU
506 * (including clflush). On the write path, we only require
507 * that it hits memory in an orderly fashion and place
508 * mb barriers at the start and end of the relocation phase
509 * to ensure ordering of clflush wrt to the system.
511 if (flushes & CLFLUSH_AFTER)
518 relocate_entry(struct drm_i915_gem_object *obj,
519 const struct drm_i915_gem_relocation_entry *reloc,
520 struct reloc_cache *cache,
523 u64 offset = reloc->offset;
524 bool wide = cache->use_64bit_reloc;
527 target_offset = relocation_target(reloc, target_offset);
529 vaddr = reloc_vaddr(obj, cache, offset >> PAGE_SHIFT);
531 return PTR_ERR(vaddr);
533 clflush_write32(vaddr + offset_in_page(offset),
534 lower_32_bits(target_offset),
538 offset += sizeof(u32);
539 target_offset >>= 32;
548 i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
550 struct drm_i915_gem_relocation_entry *reloc,
551 struct reloc_cache *cache)
553 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
554 struct drm_gem_object *target_obj;
555 struct drm_i915_gem_object *target_i915_obj;
556 struct i915_vma *target_vma;
557 uint64_t target_offset;
560 /* we've already hold a reference to all valid objects */
561 target_vma = eb_get_vma(eb, reloc->target_handle);
562 if (unlikely(target_vma == NULL))
564 target_i915_obj = target_vma->obj;
565 target_obj = &target_vma->obj->base;
567 target_offset = gen8_canonical_addr(target_vma->node.start);
569 /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
570 * pipe_control writes because the gpu doesn't properly redirect them
571 * through the ppgtt for non_secure batchbuffers. */
572 if (unlikely(IS_GEN6(dev_priv) &&
573 reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION)) {
574 ret = i915_vma_bind(target_vma, target_i915_obj->cache_level,
576 if (WARN_ONCE(ret, "Unexpected failure to bind target VMA!"))
580 /* Validate that the target is in a valid r/w GPU domain */
581 if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
582 DRM_DEBUG("reloc with multiple write domains: "
583 "obj %p target %d offset %d "
584 "read %08x write %08x",
585 obj, reloc->target_handle,
588 reloc->write_domain);
591 if (unlikely((reloc->write_domain | reloc->read_domains)
592 & ~I915_GEM_GPU_DOMAINS)) {
593 DRM_DEBUG("reloc with read/write non-GPU domains: "
594 "obj %p target %d offset %d "
595 "read %08x write %08x",
596 obj, reloc->target_handle,
599 reloc->write_domain);
603 target_obj->pending_read_domains |= reloc->read_domains;
604 target_obj->pending_write_domain |= reloc->write_domain;
606 /* If the relocation already has the right value in it, no
607 * more work needs to be done.
609 if (target_offset == reloc->presumed_offset)
612 /* Check that the relocation address is valid... */
613 if (unlikely(reloc->offset >
614 obj->base.size - (cache->use_64bit_reloc ? 8 : 4))) {
615 DRM_DEBUG("Relocation beyond object bounds: "
616 "obj %p target %d offset %d size %d.\n",
617 obj, reloc->target_handle,
619 (int) obj->base.size);
622 if (unlikely(reloc->offset & 3)) {
623 DRM_DEBUG("Relocation not 4-byte aligned: "
624 "obj %p target %d offset %d.\n",
625 obj, reloc->target_handle,
626 (int) reloc->offset);
630 ret = relocate_entry(obj, reloc, cache, target_offset);
634 /* and update the user's relocation entry */
635 reloc->presumed_offset = target_offset;
640 i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
643 #define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
644 struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
645 struct drm_i915_gem_relocation_entry __user *user_relocs;
646 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
647 struct reloc_cache cache;
650 user_relocs = u64_to_user_ptr(entry->relocs_ptr);
651 reloc_cache_init(&cache, eb->i915);
653 remain = entry->relocation_count;
655 struct drm_i915_gem_relocation_entry *r = stack_reloc;
656 unsigned long unwritten;
659 count = min_t(unsigned int, remain, ARRAY_SIZE(stack_reloc));
662 /* This is the fast path and we cannot handle a pagefault
663 * whilst holding the struct mutex lest the user pass in the
664 * relocations contained within a mmaped bo. For in such a case
665 * we, the page fault handler would call i915_gem_fault() and
666 * we would try to acquire the struct mutex again. Obviously
667 * this is bad and so lockdep complains vehemently.
670 unwritten = __copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0]));
672 if (unlikely(unwritten)) {
678 u64 offset = r->presumed_offset;
680 ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r, &cache);
684 if (r->presumed_offset != offset) {
686 unwritten = __put_user(r->presumed_offset,
687 &user_relocs->presumed_offset);
689 if (unlikely(unwritten)) {
690 /* Note that reporting an error now
691 * leaves everything in an inconsistent
692 * state as we have *already* changed
693 * the relocation value inside the
694 * object. As we have not changed the
695 * reloc.presumed_offset or will not
696 * change the execobject.offset, on the
697 * call we may not rewrite the value
698 * inside the object, leaving it
699 * dangling and causing a GPU hang.
712 reloc_cache_fini(&cache);
718 i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma,
720 struct drm_i915_gem_relocation_entry *relocs)
722 const struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
723 struct reloc_cache cache;
726 reloc_cache_init(&cache, eb->i915);
727 for (i = 0; i < entry->relocation_count; i++) {
728 ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i], &cache);
732 reloc_cache_fini(&cache);
738 i915_gem_execbuffer_relocate(struct eb_vmas *eb)
740 struct i915_vma *vma;
743 list_for_each_entry(vma, &eb->vmas, exec_list) {
744 ret = i915_gem_execbuffer_relocate_vma(vma, eb);
752 static bool only_mappable_for_reloc(unsigned int flags)
754 return (flags & (EXEC_OBJECT_NEEDS_FENCE | __EXEC_OBJECT_NEEDS_MAP)) ==
755 __EXEC_OBJECT_NEEDS_MAP;
759 i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
760 struct intel_engine_cs *engine,
763 struct drm_i915_gem_object *obj = vma->obj;
764 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
769 if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
772 if (!drm_mm_node_allocated(&vma->node)) {
773 /* Wa32bitGeneralStateOffset & Wa32bitInstructionBaseOffset,
774 * limit address to the first 4GBs for unflagged objects.
776 if ((entry->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) == 0)
777 flags |= PIN_ZONE_4G;
778 if (entry->flags & __EXEC_OBJECT_NEEDS_MAP)
779 flags |= PIN_GLOBAL | PIN_MAPPABLE;
780 if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS)
781 flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
782 if (entry->flags & EXEC_OBJECT_PINNED)
783 flags |= entry->offset | PIN_OFFSET_FIXED;
784 if ((flags & PIN_MAPPABLE) == 0)
788 ret = i915_vma_pin(vma,
792 if ((ret == -ENOSPC || ret == -E2BIG) &&
793 only_mappable_for_reloc(entry->flags))
794 ret = i915_vma_pin(vma,
797 flags & ~PIN_MAPPABLE);
801 entry->flags |= __EXEC_OBJECT_HAS_PIN;
803 if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
804 ret = i915_vma_get_fence(vma);
808 if (i915_vma_pin_fence(vma))
809 entry->flags |= __EXEC_OBJECT_HAS_FENCE;
812 if (entry->offset != vma->node.start) {
813 entry->offset = vma->node.start;
817 if (entry->flags & EXEC_OBJECT_WRITE) {
818 obj->base.pending_read_domains = I915_GEM_DOMAIN_RENDER;
819 obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER;
826 need_reloc_mappable(struct i915_vma *vma)
828 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
830 if (entry->relocation_count == 0)
833 if (!i915_vma_is_ggtt(vma))
836 /* See also use_cpu_reloc() */
837 if (HAS_LLC(to_i915(vma->obj->base.dev)))
840 if (vma->obj->base.write_domain == I915_GEM_DOMAIN_CPU)
847 eb_vma_misplaced(struct i915_vma *vma)
849 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
851 WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP &&
852 !i915_vma_is_ggtt(vma));
854 if (entry->alignment && !IS_ALIGNED(vma->node.start, entry->alignment))
857 if (vma->node.size < entry->pad_to_size)
860 if (entry->flags & EXEC_OBJECT_PINNED &&
861 vma->node.start != entry->offset)
864 if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS &&
865 vma->node.start < BATCH_OFFSET_BIAS)
868 /* avoid costly ping-pong once a batch bo ended up non-mappable */
869 if (entry->flags & __EXEC_OBJECT_NEEDS_MAP &&
870 !i915_vma_is_map_and_fenceable(vma))
871 return !only_mappable_for_reloc(entry->flags);
873 if ((entry->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) == 0 &&
874 (vma->node.start + vma->node.size - 1) >> 32)
881 i915_gem_execbuffer_reserve(struct intel_engine_cs *engine,
882 struct list_head *vmas,
883 struct i915_gem_context *ctx,
886 struct drm_i915_gem_object *obj;
887 struct i915_vma *vma;
888 struct i915_address_space *vm;
889 struct list_head ordered_vmas;
890 struct list_head pinned_vmas;
891 bool has_fenced_gpu_access = INTEL_GEN(engine->i915) < 4;
894 vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
896 INIT_LIST_HEAD(&ordered_vmas);
897 INIT_LIST_HEAD(&pinned_vmas);
898 while (!list_empty(vmas)) {
899 struct drm_i915_gem_exec_object2 *entry;
900 bool need_fence, need_mappable;
902 vma = list_first_entry(vmas, struct i915_vma, exec_list);
904 entry = vma->exec_entry;
906 if (ctx->flags & CONTEXT_NO_ZEROMAP)
907 entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;
909 if (!has_fenced_gpu_access)
910 entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
912 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
913 i915_gem_object_is_tiled(obj);
914 need_mappable = need_fence || need_reloc_mappable(vma);
916 if (entry->flags & EXEC_OBJECT_PINNED)
917 list_move_tail(&vma->exec_list, &pinned_vmas);
918 else if (need_mappable) {
919 entry->flags |= __EXEC_OBJECT_NEEDS_MAP;
920 list_move(&vma->exec_list, &ordered_vmas);
922 list_move_tail(&vma->exec_list, &ordered_vmas);
924 obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
925 obj->base.pending_write_domain = 0;
927 list_splice(&ordered_vmas, vmas);
928 list_splice(&pinned_vmas, vmas);
930 /* Attempt to pin all of the buffers into the GTT.
931 * This is done in 3 phases:
933 * 1a. Unbind all objects that do not match the GTT constraints for
934 * the execbuffer (fenceable, mappable, alignment etc).
935 * 1b. Increment pin count for already bound objects.
936 * 2. Bind new objects.
937 * 3. Decrement pin count.
939 * This avoid unnecessary unbinding of later objects in order to make
940 * room for the earlier objects *unless* we need to defragment.
946 /* Unbind any ill-fitting objects or pin. */
947 list_for_each_entry(vma, vmas, exec_list) {
948 if (!drm_mm_node_allocated(&vma->node))
951 if (eb_vma_misplaced(vma))
952 ret = i915_vma_unbind(vma);
954 ret = i915_gem_execbuffer_reserve_vma(vma,
961 /* Bind fresh objects */
962 list_for_each_entry(vma, vmas, exec_list) {
963 if (drm_mm_node_allocated(&vma->node))
966 ret = i915_gem_execbuffer_reserve_vma(vma, engine,
973 if (ret != -ENOSPC || retry++)
976 /* Decrement pin count for bound objects */
977 list_for_each_entry(vma, vmas, exec_list)
978 i915_gem_execbuffer_unreserve_vma(vma);
980 ret = i915_gem_evict_vm(vm, true);
987 i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
988 struct drm_i915_gem_execbuffer2 *args,
989 struct drm_file *file,
990 struct intel_engine_cs *engine,
992 struct drm_i915_gem_exec_object2 *exec,
993 struct i915_gem_context *ctx)
995 struct drm_i915_gem_relocation_entry *reloc;
996 struct i915_address_space *vm;
997 struct i915_vma *vma;
1001 unsigned count = args->buffer_count;
1003 vm = list_first_entry(&eb->vmas, struct i915_vma, exec_list)->vm;
1005 /* We may process another execbuffer during the unlock... */
1006 while (!list_empty(&eb->vmas)) {
1007 vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list);
1008 list_del_init(&vma->exec_list);
1009 i915_gem_execbuffer_unreserve_vma(vma);
1013 mutex_unlock(&dev->struct_mutex);
1016 for (i = 0; i < count; i++)
1017 total += exec[i].relocation_count;
1019 reloc_offset = drm_malloc_ab(count, sizeof(*reloc_offset));
1020 reloc = drm_malloc_ab(total, sizeof(*reloc));
1021 if (reloc == NULL || reloc_offset == NULL) {
1022 drm_free_large(reloc);
1023 drm_free_large(reloc_offset);
1024 mutex_lock(&dev->struct_mutex);
1029 for (i = 0; i < count; i++) {
1030 struct drm_i915_gem_relocation_entry __user *user_relocs;
1031 u64 invalid_offset = (u64)-1;
1034 user_relocs = u64_to_user_ptr(exec[i].relocs_ptr);
1036 if (copy_from_user(reloc+total, user_relocs,
1037 exec[i].relocation_count * sizeof(*reloc))) {
1039 mutex_lock(&dev->struct_mutex);
1043 /* As we do not update the known relocation offsets after
1044 * relocating (due to the complexities in lock handling),
1045 * we need to mark them as invalid now so that we force the
1046 * relocation processing next time. Just in case the target
1047 * object is evicted and then rebound into its old
1048 * presumed_offset before the next execbuffer - if that
1049 * happened we would make the mistake of assuming that the
1050 * relocations were valid.
1052 for (j = 0; j < exec[i].relocation_count; j++) {
1053 if (__copy_to_user(&user_relocs[j].presumed_offset,
1055 sizeof(invalid_offset))) {
1057 mutex_lock(&dev->struct_mutex);
1062 reloc_offset[i] = total;
1063 total += exec[i].relocation_count;
1066 ret = i915_mutex_lock_interruptible(dev);
1068 mutex_lock(&dev->struct_mutex);
1072 /* reacquire the objects */
1074 ret = eb_lookup_vmas(eb, exec, args, vm, file);
1078 need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
1079 ret = i915_gem_execbuffer_reserve(engine, &eb->vmas, ctx,
1084 list_for_each_entry(vma, &eb->vmas, exec_list) {
1085 int offset = vma->exec_entry - exec;
1086 ret = i915_gem_execbuffer_relocate_vma_slow(vma, eb,
1087 reloc + reloc_offset[offset]);
1092 /* Leave the user relocations as are, this is the painfully slow path,
1093 * and we want to avoid the complication of dropping the lock whilst
1094 * having buffers reserved in the aperture and so causing spurious
1095 * ENOSPC for random operations.
1099 drm_free_large(reloc);
1100 drm_free_large(reloc_offset);
1105 i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
1106 struct list_head *vmas)
1108 struct i915_vma *vma;
1111 list_for_each_entry(vma, vmas, exec_list) {
1112 struct drm_i915_gem_object *obj = vma->obj;
1114 if (vma->exec_entry->flags & EXEC_OBJECT_ASYNC)
1117 ret = i915_gem_request_await_object
1118 (req, obj, obj->base.pending_write_domain);
1122 if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
1123 i915_gem_clflush_object(obj, false);
1126 /* Unconditionally flush any chipset caches (for streaming writes). */
1127 i915_gem_chipset_flush(req->engine->i915);
1129 /* Unconditionally invalidate GPU caches and TLBs. */
1130 return req->engine->emit_flush(req, EMIT_INVALIDATE);
1134 i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
1136 if (exec->flags & __I915_EXEC_UNKNOWN_FLAGS)
1139 /* Kernel clipping was a DRI1 misfeature */
1140 if (exec->num_cliprects || exec->cliprects_ptr)
1143 if (exec->DR4 == 0xffffffff) {
1144 DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
1147 if (exec->DR1 || exec->DR4)
1150 if ((exec->batch_start_offset | exec->batch_len) & 0x7)
1157 validate_exec_list(struct drm_device *dev,
1158 struct drm_i915_gem_exec_object2 *exec,
1161 unsigned relocs_total = 0;
1162 unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
1163 unsigned invalid_flags;
1166 /* INTERNAL flags must not overlap with external ones */
1167 BUILD_BUG_ON(__EXEC_OBJECT_INTERNAL_FLAGS & ~__EXEC_OBJECT_UNKNOWN_FLAGS);
1169 invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
1170 if (USES_FULL_PPGTT(dev))
1171 invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
1173 for (i = 0; i < count; i++) {
1174 char __user *ptr = u64_to_user_ptr(exec[i].relocs_ptr);
1175 int length; /* limited by fault_in_pages_readable() */
1177 if (exec[i].flags & invalid_flags)
1180 /* Offset can be used as input (EXEC_OBJECT_PINNED), reject
1181 * any non-page-aligned or non-canonical addresses.
1183 if (exec[i].flags & EXEC_OBJECT_PINNED) {
1184 if (exec[i].offset !=
1185 gen8_canonical_addr(exec[i].offset & PAGE_MASK))
1189 /* From drm_mm perspective address space is continuous,
1190 * so from this point we're always using non-canonical
1193 exec[i].offset = gen8_noncanonical_addr(exec[i].offset);
1195 if (exec[i].alignment && !is_power_of_2(exec[i].alignment))
1198 /* pad_to_size was once a reserved field, so sanitize it */
1199 if (exec[i].flags & EXEC_OBJECT_PAD_TO_SIZE) {
1200 if (offset_in_page(exec[i].pad_to_size))
1203 exec[i].pad_to_size = 0;
1206 /* First check for malicious input causing overflow in
1207 * the worst case where we need to allocate the entire
1208 * relocation tree as a single array.
1210 if (exec[i].relocation_count > relocs_max - relocs_total)
1212 relocs_total += exec[i].relocation_count;
1214 length = exec[i].relocation_count *
1215 sizeof(struct drm_i915_gem_relocation_entry);
1217 * We must check that the entire relocation array is safe
1218 * to read, but since we may need to update the presumed
1219 * offsets during execution, check for full write access.
1221 if (!access_ok(VERIFY_WRITE, ptr, length))
1224 if (likely(!i915.prefault_disable)) {
1225 if (fault_in_pages_readable(ptr, length))
1233 static struct i915_gem_context *
1234 i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
1235 struct intel_engine_cs *engine, const u32 ctx_id)
1237 struct i915_gem_context *ctx;
1239 ctx = i915_gem_context_lookup(file->driver_priv, ctx_id);
1243 if (i915_gem_context_is_banned(ctx)) {
1244 DRM_DEBUG("Context %u tried to submit while banned\n", ctx_id);
1245 return ERR_PTR(-EIO);
1251 static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj)
1253 return !(obj->cache_level == I915_CACHE_NONE ||
1254 obj->cache_level == I915_CACHE_WT);
1257 void i915_vma_move_to_active(struct i915_vma *vma,
1258 struct drm_i915_gem_request *req,
1261 struct drm_i915_gem_object *obj = vma->obj;
1262 const unsigned int idx = req->engine->id;
1264 lockdep_assert_held(&req->i915->drm.struct_mutex);
1265 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1267 /* Add a reference if we're newly entering the active list.
1268 * The order in which we add operations to the retirement queue is
1269 * vital here: mark_active adds to the start of the callback list,
1270 * such that subsequent callbacks are called first. Therefore we
1271 * add the active reference first and queue for it to be dropped
1274 if (!i915_vma_is_active(vma))
1275 obj->active_count++;
1276 i915_vma_set_active(vma, idx);
1277 i915_gem_active_set(&vma->last_read[idx], req);
1278 list_move_tail(&vma->vm_link, &vma->vm->active_list);
1280 if (flags & EXEC_OBJECT_WRITE) {
1281 if (intel_fb_obj_invalidate(obj, ORIGIN_CS))
1282 i915_gem_active_set(&obj->frontbuffer_write, req);
1284 /* update for the implicit flush after a batch */
1285 obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
1286 if (!obj->cache_dirty && gpu_write_needs_clflush(obj))
1287 obj->cache_dirty = true;
1290 if (flags & EXEC_OBJECT_NEEDS_FENCE)
1291 i915_gem_active_set(&vma->last_fence, req);
1294 static void eb_export_fence(struct drm_i915_gem_object *obj,
1295 struct drm_i915_gem_request *req,
1298 struct reservation_object *resv = obj->resv;
1300 /* Ignore errors from failing to allocate the new fence, we can't
1301 * handle an error right now. Worst case should be missed
1302 * synchronisation leading to rendering corruption.
1304 reservation_object_lock(resv, NULL);
1305 if (flags & EXEC_OBJECT_WRITE)
1306 reservation_object_add_excl_fence(resv, &req->fence);
1307 else if (reservation_object_reserve_shared(resv) == 0)
1308 reservation_object_add_shared_fence(resv, &req->fence);
1309 reservation_object_unlock(resv);
1313 i915_gem_execbuffer_move_to_active(struct list_head *vmas,
1314 struct drm_i915_gem_request *req)
1316 struct i915_vma *vma;
1318 list_for_each_entry(vma, vmas, exec_list) {
1319 struct drm_i915_gem_object *obj = vma->obj;
1320 u32 old_read = obj->base.read_domains;
1321 u32 old_write = obj->base.write_domain;
1323 obj->base.write_domain = obj->base.pending_write_domain;
1324 if (obj->base.write_domain)
1325 vma->exec_entry->flags |= EXEC_OBJECT_WRITE;
1327 obj->base.pending_read_domains |= obj->base.read_domains;
1328 obj->base.read_domains = obj->base.pending_read_domains;
1330 i915_vma_move_to_active(vma, req, vma->exec_entry->flags);
1331 eb_export_fence(obj, req, vma->exec_entry->flags);
1332 trace_i915_gem_object_change_domain(obj, old_read, old_write);
1337 i915_reset_gen7_sol_offsets(struct drm_i915_gem_request *req)
1342 if (!IS_GEN7(req->i915) || req->engine->id != RCS) {
1343 DRM_DEBUG("sol reset is gen7/rcs only\n");
1347 cs = intel_ring_begin(req, 4 * 3);
1351 for (i = 0; i < 4; i++) {
1352 *cs++ = MI_LOAD_REGISTER_IMM(1);
1353 *cs++ = i915_mmio_reg_offset(GEN7_SO_WRITE_OFFSET(i));
1357 intel_ring_advance(req, cs);
1362 static struct i915_vma *
1363 i915_gem_execbuffer_parse(struct intel_engine_cs *engine,
1364 struct drm_i915_gem_exec_object2 *shadow_exec_entry,
1365 struct drm_i915_gem_object *batch_obj,
1367 u32 batch_start_offset,
1371 struct drm_i915_gem_object *shadow_batch_obj;
1372 struct i915_vma *vma;
1375 shadow_batch_obj = i915_gem_batch_pool_get(&engine->batch_pool,
1376 PAGE_ALIGN(batch_len));
1377 if (IS_ERR(shadow_batch_obj))
1378 return ERR_CAST(shadow_batch_obj);
1380 ret = intel_engine_cmd_parser(engine,
1387 if (ret == -EACCES) /* unhandled chained batch */
1394 vma = i915_gem_object_ggtt_pin(shadow_batch_obj, NULL, 0, 0, 0);
1398 memset(shadow_exec_entry, 0, sizeof(*shadow_exec_entry));
1400 vma->exec_entry = shadow_exec_entry;
1401 vma->exec_entry->flags = __EXEC_OBJECT_HAS_PIN;
1402 i915_gem_object_get(shadow_batch_obj);
1403 list_add_tail(&vma->exec_list, &eb->vmas);
1406 i915_gem_object_unpin_pages(shadow_batch_obj);
1411 execbuf_submit(struct i915_execbuffer_params *params,
1412 struct drm_i915_gem_execbuffer2 *args,
1413 struct list_head *vmas)
1415 struct drm_i915_private *dev_priv = params->request->i915;
1416 u64 exec_start, exec_len;
1418 u32 instp_mask, *cs;
1421 ret = i915_gem_execbuffer_move_to_gpu(params->request, vmas);
1425 ret = i915_switch_context(params->request);
1429 instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
1430 instp_mask = I915_EXEC_CONSTANTS_MASK;
1431 switch (instp_mode) {
1432 case I915_EXEC_CONSTANTS_REL_GENERAL:
1433 case I915_EXEC_CONSTANTS_ABSOLUTE:
1434 case I915_EXEC_CONSTANTS_REL_SURFACE:
1435 if (instp_mode != 0 && params->engine->id != RCS) {
1436 DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
1440 if (instp_mode != dev_priv->relative_constants_mode) {
1441 if (INTEL_INFO(dev_priv)->gen < 4) {
1442 DRM_DEBUG("no rel constants on pre-gen4\n");
1446 if (INTEL_INFO(dev_priv)->gen > 5 &&
1447 instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
1448 DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
1452 /* The HW changed the meaning on this bit on gen6 */
1453 if (INTEL_INFO(dev_priv)->gen >= 6)
1454 instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
1458 DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode);
1462 if (params->engine->id == RCS &&
1463 instp_mode != dev_priv->relative_constants_mode) {
1464 cs = intel_ring_begin(params->request, 4);
1469 *cs++ = MI_LOAD_REGISTER_IMM(1);
1470 *cs++ = i915_mmio_reg_offset(INSTPM);
1471 *cs++ = instp_mask << 16 | instp_mode;
1472 intel_ring_advance(params->request, cs);
1474 dev_priv->relative_constants_mode = instp_mode;
1477 if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
1478 ret = i915_reset_gen7_sol_offsets(params->request);
1483 exec_len = args->batch_len;
1484 exec_start = params->batch->node.start +
1485 params->args_batch_start_offset;
1488 exec_len = params->batch->size - params->args_batch_start_offset;
1490 ret = params->engine->emit_bb_start(params->request,
1491 exec_start, exec_len,
1492 params->dispatch_flags);
1496 trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags);
1498 i915_gem_execbuffer_move_to_active(vmas, params->request);
1504 * Find one BSD ring to dispatch the corresponding BSD command.
1505 * The engine index is returned.
1508 gen8_dispatch_bsd_engine(struct drm_i915_private *dev_priv,
1509 struct drm_file *file)
1511 struct drm_i915_file_private *file_priv = file->driver_priv;
1513 /* Check whether the file_priv has already selected one ring. */
1514 if ((int)file_priv->bsd_engine < 0)
1515 file_priv->bsd_engine = atomic_fetch_xor(1,
1516 &dev_priv->mm.bsd_engine_dispatch_index);
1518 return file_priv->bsd_engine;
1521 #define I915_USER_RINGS (4)
1523 static const enum intel_engine_id user_ring_map[I915_USER_RINGS + 1] = {
1524 [I915_EXEC_DEFAULT] = RCS,
1525 [I915_EXEC_RENDER] = RCS,
1526 [I915_EXEC_BLT] = BCS,
1527 [I915_EXEC_BSD] = VCS,
1528 [I915_EXEC_VEBOX] = VECS
1531 static struct intel_engine_cs *
1532 eb_select_engine(struct drm_i915_private *dev_priv,
1533 struct drm_file *file,
1534 struct drm_i915_gem_execbuffer2 *args)
1536 unsigned int user_ring_id = args->flags & I915_EXEC_RING_MASK;
1537 struct intel_engine_cs *engine;
1539 if (user_ring_id > I915_USER_RINGS) {
1540 DRM_DEBUG("execbuf with unknown ring: %u\n", user_ring_id);
1544 if ((user_ring_id != I915_EXEC_BSD) &&
1545 ((args->flags & I915_EXEC_BSD_MASK) != 0)) {
1546 DRM_DEBUG("execbuf with non bsd ring but with invalid "
1547 "bsd dispatch flags: %d\n", (int)(args->flags));
1551 if (user_ring_id == I915_EXEC_BSD && HAS_BSD2(dev_priv)) {
1552 unsigned int bsd_idx = args->flags & I915_EXEC_BSD_MASK;
1554 if (bsd_idx == I915_EXEC_BSD_DEFAULT) {
1555 bsd_idx = gen8_dispatch_bsd_engine(dev_priv, file);
1556 } else if (bsd_idx >= I915_EXEC_BSD_RING1 &&
1557 bsd_idx <= I915_EXEC_BSD_RING2) {
1558 bsd_idx >>= I915_EXEC_BSD_SHIFT;
1561 DRM_DEBUG("execbuf with unknown bsd ring: %u\n",
1566 engine = dev_priv->engine[_VCS(bsd_idx)];
1568 engine = dev_priv->engine[user_ring_map[user_ring_id]];
1572 DRM_DEBUG("execbuf with invalid ring: %u\n", user_ring_id);
1580 i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1581 struct drm_file *file,
1582 struct drm_i915_gem_execbuffer2 *args,
1583 struct drm_i915_gem_exec_object2 *exec)
1585 struct drm_i915_private *dev_priv = to_i915(dev);
1586 struct i915_ggtt *ggtt = &dev_priv->ggtt;
1588 struct drm_i915_gem_exec_object2 shadow_exec_entry;
1589 struct intel_engine_cs *engine;
1590 struct i915_gem_context *ctx;
1591 struct i915_address_space *vm;
1592 struct i915_execbuffer_params params_master; /* XXX: will be removed later */
1593 struct i915_execbuffer_params *params = ¶ms_master;
1594 const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
1596 struct dma_fence *in_fence = NULL;
1597 struct sync_file *out_fence = NULL;
1598 int out_fence_fd = -1;
1602 if (!i915_gem_check_execbuffer(args))
1605 ret = validate_exec_list(dev, exec, args->buffer_count);
1610 if (args->flags & I915_EXEC_SECURE) {
1611 if (!drm_is_current_master(file) || !capable(CAP_SYS_ADMIN))
1614 dispatch_flags |= I915_DISPATCH_SECURE;
1616 if (args->flags & I915_EXEC_IS_PINNED)
1617 dispatch_flags |= I915_DISPATCH_PINNED;
1619 engine = eb_select_engine(dev_priv, file, args);
1623 if (args->buffer_count < 1) {
1624 DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
1628 if (args->flags & I915_EXEC_RESOURCE_STREAMER) {
1629 if (!HAS_RESOURCE_STREAMER(dev_priv)) {
1630 DRM_DEBUG("RS is only allowed for Haswell, Gen8 and above\n");
1633 if (engine->id != RCS) {
1634 DRM_DEBUG("RS is not available on %s\n",
1639 dispatch_flags |= I915_DISPATCH_RS;
1642 if (args->flags & I915_EXEC_FENCE_IN) {
1643 in_fence = sync_file_get_fence(lower_32_bits(args->rsvd2));
1648 if (args->flags & I915_EXEC_FENCE_OUT) {
1649 out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
1650 if (out_fence_fd < 0) {
1656 /* Take a local wakeref for preparing to dispatch the execbuf as
1657 * we expect to access the hardware fairly frequently in the
1658 * process. Upon first dispatch, we acquire another prolonged
1659 * wakeref that we hold until the GPU has been idle for at least
1662 intel_runtime_pm_get(dev_priv);
1664 ret = i915_mutex_lock_interruptible(dev);
1668 ctx = i915_gem_validate_context(dev, file, engine, ctx_id);
1670 mutex_unlock(&dev->struct_mutex);
1675 i915_gem_context_get(ctx);
1678 vm = &ctx->ppgtt->base;
1682 memset(¶ms_master, 0x00, sizeof(params_master));
1684 eb = eb_create(dev_priv, args);
1686 i915_gem_context_put(ctx);
1687 mutex_unlock(&dev->struct_mutex);
1692 /* Look up object handles */
1693 ret = eb_lookup_vmas(eb, exec, args, vm, file);
1697 /* take note of the batch buffer before we might reorder the lists */
1698 params->batch = eb_get_batch(eb);
1700 /* Move the objects en-masse into the GTT, evicting if necessary. */
1701 need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
1702 ret = i915_gem_execbuffer_reserve(engine, &eb->vmas, ctx,
1707 /* The objects are in their final locations, apply the relocations. */
1709 ret = i915_gem_execbuffer_relocate(eb);
1711 if (ret == -EFAULT) {
1712 ret = i915_gem_execbuffer_relocate_slow(dev, args, file,
1715 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1721 /* Set the pending read domains for the batch buffer to COMMAND */
1722 if (params->batch->obj->base.pending_write_domain) {
1723 DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
1727 if (args->batch_start_offset > params->batch->size ||
1728 args->batch_len > params->batch->size - args->batch_start_offset) {
1729 DRM_DEBUG("Attempting to use out-of-bounds batch\n");
1734 params->args_batch_start_offset = args->batch_start_offset;
1735 if (engine->needs_cmd_parser && args->batch_len) {
1736 struct i915_vma *vma;
1738 vma = i915_gem_execbuffer_parse(engine, &shadow_exec_entry,
1741 args->batch_start_offset,
1743 drm_is_current_master(file));
1751 * Batch parsed and accepted:
1753 * Set the DISPATCH_SECURE bit to remove the NON_SECURE
1754 * bit from MI_BATCH_BUFFER_START commands issued in
1755 * the dispatch_execbuffer implementations. We
1756 * specifically don't want that set on batches the
1757 * command parser has accepted.
1759 dispatch_flags |= I915_DISPATCH_SECURE;
1760 params->args_batch_start_offset = 0;
1761 params->batch = vma;
1765 params->batch->obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
1767 /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
1768 * batch" bit. Hence we need to pin secure batches into the global gtt.
1769 * hsw should have this fixed, but bdw mucks it up again. */
1770 if (dispatch_flags & I915_DISPATCH_SECURE) {
1771 struct drm_i915_gem_object *obj = params->batch->obj;
1772 struct i915_vma *vma;
1775 * So on first glance it looks freaky that we pin the batch here
1776 * outside of the reservation loop. But:
1777 * - The batch is already pinned into the relevant ppgtt, so we
1778 * already have the backing storage fully allocated.
1779 * - No other BO uses the global gtt (well contexts, but meh),
1780 * so we don't really have issues with multiple objects not
1781 * fitting due to fragmentation.
1782 * So this is actually safe.
1784 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
1790 params->batch = vma;
1793 /* Allocate a request for this batch buffer nice and early. */
1794 params->request = i915_gem_request_alloc(engine, ctx);
1795 if (IS_ERR(params->request)) {
1796 ret = PTR_ERR(params->request);
1797 goto err_batch_unpin;
1801 ret = i915_gem_request_await_dma_fence(params->request,
1807 if (out_fence_fd != -1) {
1808 out_fence = sync_file_create(¶ms->request->fence);
1815 /* Whilst this request exists, batch_obj will be on the
1816 * active_list, and so will hold the active reference. Only when this
1817 * request is retired will the the batch_obj be moved onto the
1818 * inactive_list and lose its active reference. Hence we do not need
1819 * to explicitly hold another reference here.
1821 params->request->batch = params->batch;
1823 ret = i915_gem_request_add_to_client(params->request, file);
1828 * Save assorted stuff away to pass through to *_submission().
1829 * NB: This data should be 'persistent' and not local as it will
1830 * kept around beyond the duration of the IOCTL once the GPU
1831 * scheduler arrives.
1834 params->file = file;
1835 params->engine = engine;
1836 params->dispatch_flags = dispatch_flags;
1839 ret = execbuf_submit(params, args, &eb->vmas);
1841 __i915_add_request(params->request, ret == 0);
1844 fd_install(out_fence_fd, out_fence->file);
1845 args->rsvd2 &= GENMASK_ULL(0, 31); /* keep in-fence */
1846 args->rsvd2 |= (u64)out_fence_fd << 32;
1849 fput(out_fence->file);
1855 * FIXME: We crucially rely upon the active tracking for the (ppgtt)
1856 * batch vma for correctness. For less ugly and less fragility this
1857 * needs to be adjusted to also track the ggtt batch vma properly as
1860 if (dispatch_flags & I915_DISPATCH_SECURE)
1861 i915_vma_unpin(params->batch);
1863 /* the request owns the ref now */
1864 i915_gem_context_put(ctx);
1867 mutex_unlock(&dev->struct_mutex);
1870 /* intel_gpu_busy should also get a ref, so it will free when the device
1871 * is really idle. */
1872 intel_runtime_pm_put(dev_priv);
1873 if (out_fence_fd != -1)
1874 put_unused_fd(out_fence_fd);
1876 dma_fence_put(in_fence);
1881 * Legacy execbuffer just creates an exec2 list from the original exec object
1882 * list array and passes it to the real function.
1885 i915_gem_execbuffer(struct drm_device *dev, void *data,
1886 struct drm_file *file)
1888 struct drm_i915_gem_execbuffer *args = data;
1889 struct drm_i915_gem_execbuffer2 exec2;
1890 struct drm_i915_gem_exec_object *exec_list = NULL;
1891 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1894 if (args->buffer_count < 1) {
1895 DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
1899 /* Copy in the exec list from userland */
1900 exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
1901 exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
1902 if (exec_list == NULL || exec2_list == NULL) {
1903 DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
1904 args->buffer_count);
1905 drm_free_large(exec_list);
1906 drm_free_large(exec2_list);
1909 ret = copy_from_user(exec_list,
1910 u64_to_user_ptr(args->buffers_ptr),
1911 sizeof(*exec_list) * args->buffer_count);
1913 DRM_DEBUG("copy %d exec entries failed %d\n",
1914 args->buffer_count, ret);
1915 drm_free_large(exec_list);
1916 drm_free_large(exec2_list);
1920 for (i = 0; i < args->buffer_count; i++) {
1921 exec2_list[i].handle = exec_list[i].handle;
1922 exec2_list[i].relocation_count = exec_list[i].relocation_count;
1923 exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
1924 exec2_list[i].alignment = exec_list[i].alignment;
1925 exec2_list[i].offset = exec_list[i].offset;
1926 if (INTEL_GEN(to_i915(dev)) < 4)
1927 exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
1929 exec2_list[i].flags = 0;
1932 exec2.buffers_ptr = args->buffers_ptr;
1933 exec2.buffer_count = args->buffer_count;
1934 exec2.batch_start_offset = args->batch_start_offset;
1935 exec2.batch_len = args->batch_len;
1936 exec2.DR1 = args->DR1;
1937 exec2.DR4 = args->DR4;
1938 exec2.num_cliprects = args->num_cliprects;
1939 exec2.cliprects_ptr = args->cliprects_ptr;
1940 exec2.flags = I915_EXEC_RENDER;
1941 i915_execbuffer2_set_context_id(exec2, 0);
1943 ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
1945 struct drm_i915_gem_exec_object __user *user_exec_list =
1946 u64_to_user_ptr(args->buffers_ptr);
1948 /* Copy the new buffer offsets back to the user's exec list. */
1949 for (i = 0; i < args->buffer_count; i++) {
1950 exec2_list[i].offset =
1951 gen8_canonical_addr(exec2_list[i].offset);
1952 ret = __copy_to_user(&user_exec_list[i].offset,
1953 &exec2_list[i].offset,
1954 sizeof(user_exec_list[i].offset));
1957 DRM_DEBUG("failed to copy %d exec entries "
1958 "back to user (%d)\n",
1959 args->buffer_count, ret);
1965 drm_free_large(exec_list);
1966 drm_free_large(exec2_list);
1971 i915_gem_execbuffer2(struct drm_device *dev, void *data,
1972 struct drm_file *file)
1974 struct drm_i915_gem_execbuffer2 *args = data;
1975 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1978 if (args->buffer_count < 1 ||
1979 args->buffer_count > UINT_MAX / sizeof(*exec2_list)) {
1980 DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
1984 exec2_list = drm_malloc_gfp(args->buffer_count,
1985 sizeof(*exec2_list),
1987 if (exec2_list == NULL) {
1988 DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
1989 args->buffer_count);
1992 ret = copy_from_user(exec2_list,
1993 u64_to_user_ptr(args->buffers_ptr),
1994 sizeof(*exec2_list) * args->buffer_count);
1996 DRM_DEBUG("copy %d exec entries failed %d\n",
1997 args->buffer_count, ret);
1998 drm_free_large(exec2_list);
2002 ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
2004 /* Copy the new buffer offsets back to the user's exec list. */
2005 struct drm_i915_gem_exec_object2 __user *user_exec_list =
2006 u64_to_user_ptr(args->buffers_ptr);
2009 for (i = 0; i < args->buffer_count; i++) {
2010 exec2_list[i].offset =
2011 gen8_canonical_addr(exec2_list[i].offset);
2012 ret = __copy_to_user(&user_exec_list[i].offset,
2013 &exec2_list[i].offset,
2014 sizeof(user_exec_list[i].offset));
2017 DRM_DEBUG("failed to copy %d exec entries "
2019 args->buffer_count);
2025 drm_free_large(exec2_list);