2 * SPDX-License-Identifier: MIT
4 * Copyright © 2008,2010 Intel Corporation
7 #include <linux/intel-iommu.h>
8 #include <linux/dma-resv.h>
9 #include <linux/sync_file.h>
10 #include <linux/uaccess.h>
12 #include <drm/drm_syncobj.h>
13 #include <drm/i915_drm.h>
15 #include "display/intel_frontbuffer.h"
17 #include "gem/i915_gem_ioctls.h"
18 #include "gt/intel_context.h"
19 #include "gt/intel_engine_pool.h"
20 #include "gt/intel_gt.h"
21 #include "gt/intel_gt_pm.h"
24 #include "i915_gem_clflush.h"
25 #include "i915_gem_context.h"
26 #include "i915_gem_ioctls.h"
27 #include "i915_trace.h"
33 #define DBG_FORCE_RELOC 0 /* choose one of the above! */
36 #define __EXEC_OBJECT_HAS_REF BIT(31)
37 #define __EXEC_OBJECT_HAS_PIN BIT(30)
38 #define __EXEC_OBJECT_HAS_FENCE BIT(29)
39 #define __EXEC_OBJECT_NEEDS_MAP BIT(28)
40 #define __EXEC_OBJECT_NEEDS_BIAS BIT(27)
41 #define __EXEC_OBJECT_INTERNAL_FLAGS (~0u << 27) /* all of the above */
42 #define __EXEC_OBJECT_RESERVED (__EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_FENCE)
44 #define __EXEC_HAS_RELOC BIT(31)
45 #define __EXEC_VALIDATED BIT(30)
46 #define __EXEC_INTERNAL_FLAGS (~0u << 30)
47 #define UPDATE PIN_OFFSET_FIXED
49 #define BATCH_OFFSET_BIAS (256*1024)
51 #define __I915_EXEC_ILLEGAL_FLAGS \
52 (__I915_EXEC_UNKNOWN_FLAGS | \
53 I915_EXEC_CONSTANTS_MASK | \
54 I915_EXEC_RESOURCE_STREAMER)
56 /* Catch emission of unexpected errors for CI! */
57 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
60 DRM_DEBUG_DRIVER("EINVAL at %s:%d\n", __func__, __LINE__); \
66 * DOC: User command execution
68 * Userspace submits commands to be executed on the GPU as an instruction
69 * stream within a GEM object we call a batchbuffer. This instructions may
70 * refer to other GEM objects containing auxiliary state such as kernels,
71 * samplers, render targets and even secondary batchbuffers. Userspace does
72 * not know where in the GPU memory these objects reside and so before the
73 * batchbuffer is passed to the GPU for execution, those addresses in the
74 * batchbuffer and auxiliary objects are updated. This is known as relocation,
75 * or patching. To try and avoid having to relocate each object on the next
76 * execution, userspace is told the location of those objects in this pass,
77 * but this remains just a hint as the kernel may choose a new location for
78 * any object in the future.
80 * At the level of talking to the hardware, submitting a batchbuffer for the
81 * GPU to execute is to add content to a buffer from which the HW
82 * command streamer is reading.
84 * 1. Add a command to load the HW context. For Logical Ring Contexts, i.e.
85 * Execlists, this command is not placed on the same buffer as the
88 * 2. Add a command to invalidate caches to the buffer.
90 * 3. Add a batchbuffer start command to the buffer; the start command is
91 * essentially a token together with the GPU address of the batchbuffer
94 * 4. Add a pipeline flush to the buffer.
96 * 5. Add a memory write command to the buffer to record when the GPU
97 * is done executing the batchbuffer. The memory write writes the
98 * global sequence number of the request, ``i915_request::global_seqno``;
99 * the i915 driver uses the current value in the register to determine
100 * if the GPU has completed the batchbuffer.
102 * 6. Add a user interrupt command to the buffer. This command instructs
103 * the GPU to issue an interrupt when the command, pipeline flush and
104 * memory write are completed.
106 * 7. Inform the hardware of the additional commands added to the buffer
107 * (by updating the tail pointer).
109 * Processing an execbuf ioctl is conceptually split up into a few phases.
111 * 1. Validation - Ensure all the pointers, handles and flags are valid.
112 * 2. Reservation - Assign GPU address space for every object
113 * 3. Relocation - Update any addresses to point to the final locations
114 * 4. Serialisation - Order the request with respect to its dependencies
115 * 5. Construction - Construct a request to execute the batchbuffer
116 * 6. Submission (at some point in the future execution)
118 * Reserving resources for the execbuf is the most complicated phase. We
119 * neither want to have to migrate the object in the address space, nor do
120 * we want to have to update any relocations pointing to this object. Ideally,
121 * we want to leave the object where it is and for all the existing relocations
122 * to match. If the object is given a new address, or if userspace thinks the
123 * object is elsewhere, we have to parse all the relocation entries and update
124 * the addresses. Userspace can set the I915_EXEC_NORELOC flag to hint that
125 * all the target addresses in all of its objects match the value in the
126 * relocation entries and that they all match the presumed offsets given by the
127 * list of execbuffer objects. Using this knowledge, we know that if we haven't
128 * moved any buffers, all the relocation entries are valid and we can skip
129 * the update. (If userspace is wrong, the likely outcome is an impromptu GPU
130 * hang.) The requirement for using I915_EXEC_NO_RELOC are:
132 * The addresses written in the objects must match the corresponding
133 * reloc.presumed_offset which in turn must match the corresponding
136 * Any render targets written to in the batch must be flagged with
139 * To avoid stalling, execobject.offset should match the current
140 * address of that object within the active context.
142 * The reservation is done is multiple phases. First we try and keep any
143 * object already bound in its current location - so as long as meets the
144 * constraints imposed by the new execbuffer. Any object left unbound after the
145 * first pass is then fitted into any available idle space. If an object does
146 * not fit, all objects are removed from the reservation and the process rerun
147 * after sorting the objects into a priority order (more difficult to fit
148 * objects are tried first). Failing that, the entire VM is cleared and we try
149 * to fit the execbuf once last time before concluding that it simply will not
152 * A small complication to all of this is that we allow userspace not only to
153 * specify an alignment and a size for the object in the address space, but
154 * we also allow userspace to specify the exact offset. This objects are
155 * simpler to place (the location is known a priori) all we have to do is make
156 * sure the space is available.
158 * Once all the objects are in place, patching up the buried pointers to point
159 * to the final locations is a fairly simple job of walking over the relocation
160 * entry arrays, looking up the right address and rewriting the value into
161 * the object. Simple! ... The relocation entries are stored in user memory
162 * and so to access them we have to copy them into a local buffer. That copy
163 * has to avoid taking any pagefaults as they may lead back to a GEM object
164 * requiring the struct_mutex (i.e. recursive deadlock). So once again we split
165 * the relocation into multiple passes. First we try to do everything within an
166 * atomic context (avoid the pagefaults) which requires that we never wait. If
167 * we detect that we may wait, or if we need to fault, then we have to fallback
168 * to a slower path. The slowpath has to drop the mutex. (Can you hear alarm
169 * bells yet?) Dropping the mutex means that we lose all the state we have
170 * built up so far for the execbuf and we must reset any global data. However,
171 * we do leave the objects pinned in their final locations - which is a
172 * potential issue for concurrent execbufs. Once we have left the mutex, we can
173 * allocate and copy all the relocation entries into a large array at our
174 * leisure, reacquire the mutex, reclaim all the objects and other state and
175 * then proceed to update any incorrect addresses with the objects.
177 * As we process the relocation entries, we maintain a record of whether the
178 * object is being written to. Using NORELOC, we expect userspace to provide
179 * this information instead. We also check whether we can skip the relocation
180 * by comparing the expected value inside the relocation entry with the target's
181 * final address. If they differ, we have to map the current object and rewrite
182 * the 4 or 8 byte pointer within.
184 * Serialising an execbuf is quite simple according to the rules of the GEM
185 * ABI. Execution within each context is ordered by the order of submission.
186 * Writes to any GEM object are in order of submission and are exclusive. Reads
187 * from a GEM object are unordered with respect to other reads, but ordered by
188 * writes. A write submitted after a read cannot occur before the read, and
189 * similarly any read submitted after a write cannot occur before the write.
190 * Writes are ordered between engines such that only one write occurs at any
191 * time (completing any reads beforehand) - using semaphores where available
192 * and CPU serialisation otherwise. Other GEM access obey the same rules, any
193 * write (either via mmaps using set-domain, or via pwrite) must flush all GPU
194 * reads before starting, and any read (either using set-domain or pread) must
195 * flush all GPU writes before starting. (Note we only employ a barrier before,
196 * we currently rely on userspace not concurrently starting a new execution
197 * whilst reading or writing to an object. This may be an advantage or not
198 * depending on how much you trust userspace not to shoot themselves in the
199 * foot.) Serialisation may just result in the request being inserted into
200 * a DAG awaiting its turn, but most simple is to wait on the CPU until
201 * all dependencies are resolved.
203 * After all of that, is just a matter of closing the request and handing it to
204 * the hardware (well, leaving it in a queue to be executed). However, we also
205 * offer the ability for batchbuffers to be run with elevated privileges so
206 * that they access otherwise hidden registers. (Used to adjust L3 cache etc.)
207 * Before any batch is given extra privileges we first must check that it
208 * contains no nefarious instructions, we check that each instruction is from
209 * our whitelist and all registers are also from an allowed list. We first
210 * copy the user's batchbuffer to a shadow (so that the user doesn't have
211 * access to it, either by the CPU or GPU as we scan it) and then parse each
212 * instruction. If everything is ok, we set a flag telling the hardware to run
213 * the batchbuffer in trusted mode, otherwise the ioctl is rejected.
216 struct i915_execbuffer {
217 struct drm_i915_private *i915; /** i915 backpointer */
218 struct drm_file *file; /** per-file lookup tables and limits */
219 struct drm_i915_gem_execbuffer2 *args; /** ioctl parameters */
220 struct drm_i915_gem_exec_object2 *exec; /** ioctl execobj[] */
221 struct i915_vma **vma;
224 struct intel_engine_cs *engine; /** engine to queue the request to */
225 struct intel_context *context; /* logical state for the request */
226 struct i915_gem_context *gem_context; /** caller's context */
228 struct i915_request *request; /** our request to build */
229 struct i915_vma *batch; /** identity of the batch obj/vma */
231 /** actual size of execobj[] as we may extend it for the cmdparser */
232 unsigned int buffer_count;
234 /** list of vma not yet bound during reservation phase */
235 struct list_head unbound;
237 /** list of vma that have execobj.relocation_count */
238 struct list_head relocs;
241 * Track the most recently used object for relocations, as we
242 * frequently have to perform multiple relocations within the same
246 struct drm_mm_node node; /** temporary GTT binding */
247 unsigned long vaddr; /** Current kmap address */
248 unsigned long page; /** Currently mapped page index */
249 unsigned int gen; /** Cached value of INTEL_GEN */
250 bool use_64bit_reloc : 1;
253 bool needs_unfenced : 1;
255 struct i915_request *rq;
257 unsigned int rq_size;
260 u64 invalid_flags; /** Set of execobj.flags that are invalid */
261 u32 context_flags; /** Set of execobj.flags to insert from the ctx */
263 u32 batch_start_offset; /** Location within object of batch */
264 u32 batch_len; /** Length of batch within object */
265 u32 batch_flags; /** Flags composed for emit_bb_start() */
268 * Indicate either the size of the hastable used to resolve
269 * relocation handles, or if negative that we are using a direct
270 * index into the execobj[].
273 struct hlist_head *buckets; /** ht for relocation handles */
276 #define exec_entry(EB, VMA) (&(EB)->exec[(VMA)->exec_flags - (EB)->flags])
279 * Used to convert any address to canonical form.
280 * Starting from gen8, some commands (e.g. STATE_BASE_ADDRESS,
281 * MI_LOAD_REGISTER_MEM and others, see Broadwell PRM Vol2a) require the
282 * addresses to be in a canonical form:
283 * "GraphicsAddress[63:48] are ignored by the HW and assumed to be in correct
284 * canonical form [63:48] == [47]."
286 #define GEN8_HIGH_ADDRESS_BIT 47
287 static inline u64 gen8_canonical_addr(u64 address)
289 return sign_extend64(address, GEN8_HIGH_ADDRESS_BIT);
292 static inline u64 gen8_noncanonical_addr(u64 address)
294 return address & GENMASK_ULL(GEN8_HIGH_ADDRESS_BIT, 0);
297 static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb)
299 return intel_engine_needs_cmd_parser(eb->engine) && eb->batch_len;
302 static int eb_create(struct i915_execbuffer *eb)
304 if (!(eb->args->flags & I915_EXEC_HANDLE_LUT)) {
305 unsigned int size = 1 + ilog2(eb->buffer_count);
308 * Without a 1:1 association between relocation handles and
309 * the execobject[] index, we instead create a hashtable.
310 * We size it dynamically based on available memory, starting
311 * first with 1:1 assocative hash and scaling back until
312 * the allocation succeeds.
314 * Later on we use a positive lut_size to indicate we are
315 * using this hashtable, and a negative value to indicate a
321 /* While we can still reduce the allocation size, don't
322 * raise a warning and allow the allocation to fail.
323 * On the last pass though, we want to try as hard
324 * as possible to perform the allocation and warn
329 flags |= __GFP_NORETRY | __GFP_NOWARN;
331 eb->buckets = kzalloc(sizeof(struct hlist_head) << size,
342 eb->lut_size = -eb->buffer_count;
349 eb_vma_misplaced(const struct drm_i915_gem_exec_object2 *entry,
350 const struct i915_vma *vma,
353 if (vma->node.size < entry->pad_to_size)
356 if (entry->alignment && !IS_ALIGNED(vma->node.start, entry->alignment))
359 if (flags & EXEC_OBJECT_PINNED &&
360 vma->node.start != entry->offset)
363 if (flags & __EXEC_OBJECT_NEEDS_BIAS &&
364 vma->node.start < BATCH_OFFSET_BIAS)
367 if (!(flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) &&
368 (vma->node.start + vma->node.size - 1) >> 32)
371 if (flags & __EXEC_OBJECT_NEEDS_MAP &&
372 !i915_vma_is_map_and_fenceable(vma))
379 eb_pin_vma(struct i915_execbuffer *eb,
380 const struct drm_i915_gem_exec_object2 *entry,
381 struct i915_vma *vma)
383 unsigned int exec_flags = *vma->exec_flags;
387 pin_flags = vma->node.start;
389 pin_flags = entry->offset & PIN_OFFSET_MASK;
391 pin_flags |= PIN_USER | PIN_NOEVICT | PIN_OFFSET_FIXED;
392 if (unlikely(exec_flags & EXEC_OBJECT_NEEDS_GTT))
393 pin_flags |= PIN_GLOBAL;
395 if (unlikely(i915_vma_pin(vma, 0, 0, pin_flags)))
398 if (unlikely(exec_flags & EXEC_OBJECT_NEEDS_FENCE)) {
399 if (unlikely(i915_vma_pin_fence(vma))) {
405 exec_flags |= __EXEC_OBJECT_HAS_FENCE;
408 *vma->exec_flags = exec_flags | __EXEC_OBJECT_HAS_PIN;
409 return !eb_vma_misplaced(entry, vma, exec_flags);
412 static inline void __eb_unreserve_vma(struct i915_vma *vma, unsigned int flags)
414 GEM_BUG_ON(!(flags & __EXEC_OBJECT_HAS_PIN));
416 if (unlikely(flags & __EXEC_OBJECT_HAS_FENCE))
417 __i915_vma_unpin_fence(vma);
419 __i915_vma_unpin(vma);
423 eb_unreserve_vma(struct i915_vma *vma, unsigned int *flags)
425 if (!(*flags & __EXEC_OBJECT_HAS_PIN))
428 __eb_unreserve_vma(vma, *flags);
429 *flags &= ~__EXEC_OBJECT_RESERVED;
433 eb_validate_vma(struct i915_execbuffer *eb,
434 struct drm_i915_gem_exec_object2 *entry,
435 struct i915_vma *vma)
437 if (unlikely(entry->flags & eb->invalid_flags))
440 if (unlikely(entry->alignment && !is_power_of_2(entry->alignment)))
444 * Offset can be used as input (EXEC_OBJECT_PINNED), reject
445 * any non-page-aligned or non-canonical addresses.
447 if (unlikely(entry->flags & EXEC_OBJECT_PINNED &&
448 entry->offset != gen8_canonical_addr(entry->offset & I915_GTT_PAGE_MASK)))
451 /* pad_to_size was once a reserved field, so sanitize it */
452 if (entry->flags & EXEC_OBJECT_PAD_TO_SIZE) {
453 if (unlikely(offset_in_page(entry->pad_to_size)))
456 entry->pad_to_size = 0;
459 if (unlikely(vma->exec_flags)) {
460 DRM_DEBUG("Object [handle %d, index %d] appears more than once in object list\n",
461 entry->handle, (int)(entry - eb->exec));
466 * From drm_mm perspective address space is continuous,
467 * so from this point we're always using non-canonical
470 entry->offset = gen8_noncanonical_addr(entry->offset);
472 if (!eb->reloc_cache.has_fence) {
473 entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
475 if ((entry->flags & EXEC_OBJECT_NEEDS_FENCE ||
476 eb->reloc_cache.needs_unfenced) &&
477 i915_gem_object_is_tiled(vma->obj))
478 entry->flags |= EXEC_OBJECT_NEEDS_GTT | __EXEC_OBJECT_NEEDS_MAP;
481 if (!(entry->flags & EXEC_OBJECT_PINNED))
482 entry->flags |= eb->context_flags;
488 eb_add_vma(struct i915_execbuffer *eb,
489 unsigned int i, unsigned batch_idx,
490 struct i915_vma *vma)
492 struct drm_i915_gem_exec_object2 *entry = &eb->exec[i];
495 GEM_BUG_ON(i915_vma_is_closed(vma));
497 if (!(eb->args->flags & __EXEC_VALIDATED)) {
498 err = eb_validate_vma(eb, entry, vma);
503 if (eb->lut_size > 0) {
504 vma->exec_handle = entry->handle;
505 hlist_add_head(&vma->exec_node,
506 &eb->buckets[hash_32(entry->handle,
510 if (entry->relocation_count)
511 list_add_tail(&vma->reloc_link, &eb->relocs);
514 * Stash a pointer from the vma to execobj, so we can query its flags,
515 * size, alignment etc as provided by the user. Also we stash a pointer
516 * to the vma inside the execobj so that we can use a direct lookup
517 * to find the right target VMA when doing relocations.
520 eb->flags[i] = entry->flags;
521 vma->exec_flags = &eb->flags[i];
524 * SNA is doing fancy tricks with compressing batch buffers, which leads
525 * to negative relocation deltas. Usually that works out ok since the
526 * relocate address is still positive, except when the batch is placed
527 * very low in the GTT. Ensure this doesn't happen.
529 * Note that actual hangs have only been observed on gen7, but for
530 * paranoia do it everywhere.
532 if (i == batch_idx) {
533 if (entry->relocation_count &&
534 !(eb->flags[i] & EXEC_OBJECT_PINNED))
535 eb->flags[i] |= __EXEC_OBJECT_NEEDS_BIAS;
536 if (eb->reloc_cache.has_fence)
537 eb->flags[i] |= EXEC_OBJECT_NEEDS_FENCE;
543 if (eb_pin_vma(eb, entry, vma)) {
544 if (entry->offset != vma->node.start) {
545 entry->offset = vma->node.start | UPDATE;
546 eb->args->flags |= __EXEC_HAS_RELOC;
549 eb_unreserve_vma(vma, vma->exec_flags);
551 list_add_tail(&vma->exec_link, &eb->unbound);
552 if (drm_mm_node_allocated(&vma->node))
553 err = i915_vma_unbind(vma);
555 vma->exec_flags = NULL;
560 static inline int use_cpu_reloc(const struct reloc_cache *cache,
561 const struct drm_i915_gem_object *obj)
563 if (!i915_gem_object_has_struct_page(obj))
566 if (DBG_FORCE_RELOC == FORCE_CPU_RELOC)
569 if (DBG_FORCE_RELOC == FORCE_GTT_RELOC)
572 return (cache->has_llc ||
574 obj->cache_level != I915_CACHE_NONE);
577 static int eb_reserve_vma(const struct i915_execbuffer *eb,
578 struct i915_vma *vma)
580 struct drm_i915_gem_exec_object2 *entry = exec_entry(eb, vma);
581 unsigned int exec_flags = *vma->exec_flags;
585 pin_flags = PIN_USER | PIN_NONBLOCK;
586 if (exec_flags & EXEC_OBJECT_NEEDS_GTT)
587 pin_flags |= PIN_GLOBAL;
590 * Wa32bitGeneralStateOffset & Wa32bitInstructionBaseOffset,
591 * limit address to the first 4GBs for unflagged objects.
593 if (!(exec_flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS))
594 pin_flags |= PIN_ZONE_4G;
596 if (exec_flags & __EXEC_OBJECT_NEEDS_MAP)
597 pin_flags |= PIN_MAPPABLE;
599 if (exec_flags & EXEC_OBJECT_PINNED) {
600 pin_flags |= entry->offset | PIN_OFFSET_FIXED;
601 pin_flags &= ~PIN_NONBLOCK; /* force overlapping checks */
602 } else if (exec_flags & __EXEC_OBJECT_NEEDS_BIAS) {
603 pin_flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
606 err = i915_vma_pin(vma,
607 entry->pad_to_size, entry->alignment,
612 if (entry->offset != vma->node.start) {
613 entry->offset = vma->node.start | UPDATE;
614 eb->args->flags |= __EXEC_HAS_RELOC;
617 if (unlikely(exec_flags & EXEC_OBJECT_NEEDS_FENCE)) {
618 err = i915_vma_pin_fence(vma);
625 exec_flags |= __EXEC_OBJECT_HAS_FENCE;
628 *vma->exec_flags = exec_flags | __EXEC_OBJECT_HAS_PIN;
629 GEM_BUG_ON(eb_vma_misplaced(entry, vma, exec_flags));
634 static int eb_reserve(struct i915_execbuffer *eb)
636 const unsigned int count = eb->buffer_count;
637 struct list_head last;
638 struct i915_vma *vma;
639 unsigned int i, pass;
643 * Attempt to pin all of the buffers into the GTT.
644 * This is done in 3 phases:
646 * 1a. Unbind all objects that do not match the GTT constraints for
647 * the execbuffer (fenceable, mappable, alignment etc).
648 * 1b. Increment pin count for already bound objects.
649 * 2. Bind new objects.
650 * 3. Decrement pin count.
652 * This avoid unnecessary unbinding of later objects in order to make
653 * room for the earlier objects *unless* we need to defragment.
659 list_for_each_entry(vma, &eb->unbound, exec_link) {
660 err = eb_reserve_vma(eb, vma);
667 /* Resort *all* the objects into priority order */
668 INIT_LIST_HEAD(&eb->unbound);
669 INIT_LIST_HEAD(&last);
670 for (i = 0; i < count; i++) {
671 unsigned int flags = eb->flags[i];
672 struct i915_vma *vma = eb->vma[i];
674 if (flags & EXEC_OBJECT_PINNED &&
675 flags & __EXEC_OBJECT_HAS_PIN)
678 eb_unreserve_vma(vma, &eb->flags[i]);
680 if (flags & EXEC_OBJECT_PINNED)
681 /* Pinned must have their slot */
682 list_add(&vma->exec_link, &eb->unbound);
683 else if (flags & __EXEC_OBJECT_NEEDS_MAP)
684 /* Map require the lowest 256MiB (aperture) */
685 list_add_tail(&vma->exec_link, &eb->unbound);
686 else if (!(flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS))
687 /* Prioritise 4GiB region for restricted bo */
688 list_add(&vma->exec_link, &last);
690 list_add_tail(&vma->exec_link, &last);
692 list_splice_tail(&last, &eb->unbound);
699 /* Too fragmented, unbind everything and retry */
700 err = i915_gem_evict_vm(eb->context->vm);
711 static unsigned int eb_batch_index(const struct i915_execbuffer *eb)
713 if (eb->args->flags & I915_EXEC_BATCH_FIRST)
716 return eb->buffer_count - 1;
719 static int eb_select_context(struct i915_execbuffer *eb)
721 struct i915_gem_context *ctx;
723 ctx = i915_gem_context_lookup(eb->file->driver_priv, eb->args->rsvd1);
727 eb->gem_context = ctx;
729 eb->invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
731 eb->context_flags = 0;
732 if (test_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags))
733 eb->context_flags |= __EXEC_OBJECT_NEEDS_BIAS;
738 static int eb_lookup_vmas(struct i915_execbuffer *eb)
740 struct radix_tree_root *handles_vma = &eb->gem_context->handles_vma;
741 struct drm_i915_gem_object *obj;
742 unsigned int i, batch;
745 if (unlikely(i915_gem_context_is_banned(eb->gem_context)))
748 INIT_LIST_HEAD(&eb->relocs);
749 INIT_LIST_HEAD(&eb->unbound);
751 batch = eb_batch_index(eb);
753 mutex_lock(&eb->gem_context->mutex);
754 if (unlikely(i915_gem_context_is_closed(eb->gem_context))) {
759 for (i = 0; i < eb->buffer_count; i++) {
760 u32 handle = eb->exec[i].handle;
761 struct i915_lut_handle *lut;
762 struct i915_vma *vma;
764 vma = radix_tree_lookup(handles_vma, handle);
768 obj = i915_gem_object_lookup(eb->file, handle);
769 if (unlikely(!obj)) {
774 vma = i915_vma_instance(obj, eb->context->vm, NULL);
780 lut = i915_lut_handle_alloc();
781 if (unlikely(!lut)) {
786 err = radix_tree_insert(handles_vma, handle, vma);
788 i915_lut_handle_free(lut);
792 /* transfer ref to lut */
793 if (!atomic_fetch_inc(&vma->open_count))
794 i915_vma_reopen(vma);
795 lut->handle = handle;
796 lut->ctx = eb->gem_context;
798 i915_gem_object_lock(obj);
799 list_add(&lut->obj_link, &obj->lut_list);
800 i915_gem_object_unlock(obj);
803 err = eb_add_vma(eb, i, batch, vma);
807 GEM_BUG_ON(vma != eb->vma[i]);
808 GEM_BUG_ON(vma->exec_flags != &eb->flags[i]);
809 GEM_BUG_ON(drm_mm_node_allocated(&vma->node) &&
810 eb_vma_misplaced(&eb->exec[i], vma, eb->flags[i]));
813 mutex_unlock(&eb->gem_context->mutex);
815 eb->args->flags |= __EXEC_VALIDATED;
816 return eb_reserve(eb);
819 i915_gem_object_put(obj);
823 mutex_unlock(&eb->gem_context->mutex);
827 static struct i915_vma *
828 eb_get_vma(const struct i915_execbuffer *eb, unsigned long handle)
830 if (eb->lut_size < 0) {
831 if (handle >= -eb->lut_size)
833 return eb->vma[handle];
835 struct hlist_head *head;
836 struct i915_vma *vma;
838 head = &eb->buckets[hash_32(handle, eb->lut_size)];
839 hlist_for_each_entry(vma, head, exec_node) {
840 if (vma->exec_handle == handle)
847 static void eb_release_vmas(const struct i915_execbuffer *eb)
849 const unsigned int count = eb->buffer_count;
852 for (i = 0; i < count; i++) {
853 struct i915_vma *vma = eb->vma[i];
854 unsigned int flags = eb->flags[i];
859 GEM_BUG_ON(vma->exec_flags != &eb->flags[i]);
860 vma->exec_flags = NULL;
863 if (flags & __EXEC_OBJECT_HAS_PIN)
864 __eb_unreserve_vma(vma, flags);
866 if (flags & __EXEC_OBJECT_HAS_REF)
871 static void eb_reset_vmas(const struct i915_execbuffer *eb)
874 if (eb->lut_size > 0)
875 memset(eb->buckets, 0,
876 sizeof(struct hlist_head) << eb->lut_size);
879 static void eb_destroy(const struct i915_execbuffer *eb)
881 GEM_BUG_ON(eb->reloc_cache.rq);
883 if (eb->lut_size > 0)
888 relocation_target(const struct drm_i915_gem_relocation_entry *reloc,
889 const struct i915_vma *target)
891 return gen8_canonical_addr((int)reloc->delta + target->node.start);
894 static void reloc_cache_init(struct reloc_cache *cache,
895 struct drm_i915_private *i915)
899 /* Must be a variable in the struct to allow GCC to unroll. */
900 cache->gen = INTEL_GEN(i915);
901 cache->has_llc = HAS_LLC(i915);
902 cache->use_64bit_reloc = HAS_64BIT_RELOC(i915);
903 cache->has_fence = cache->gen < 4;
904 cache->needs_unfenced = INTEL_INFO(i915)->unfenced_needs_alignment;
905 cache->node.allocated = false;
910 static inline void *unmask_page(unsigned long p)
912 return (void *)(uintptr_t)(p & PAGE_MASK);
915 static inline unsigned int unmask_flags(unsigned long p)
917 return p & ~PAGE_MASK;
920 #define KMAP 0x4 /* after CLFLUSH_FLAGS */
922 static inline struct i915_ggtt *cache_to_ggtt(struct reloc_cache *cache)
924 struct drm_i915_private *i915 =
925 container_of(cache, struct i915_execbuffer, reloc_cache)->i915;
929 static void reloc_gpu_flush(struct reloc_cache *cache)
931 GEM_BUG_ON(cache->rq_size >= cache->rq->batch->obj->base.size / sizeof(u32));
932 cache->rq_cmd[cache->rq_size] = MI_BATCH_BUFFER_END;
934 __i915_gem_object_flush_map(cache->rq->batch->obj, 0, cache->rq_size);
935 i915_gem_object_unpin_map(cache->rq->batch->obj);
937 intel_gt_chipset_flush(cache->rq->engine->gt);
939 i915_request_add(cache->rq);
943 static void reloc_cache_reset(struct reloc_cache *cache)
948 reloc_gpu_flush(cache);
953 vaddr = unmask_page(cache->vaddr);
954 if (cache->vaddr & KMAP) {
955 if (cache->vaddr & CLFLUSH_AFTER)
958 kunmap_atomic(vaddr);
959 i915_gem_object_finish_access((struct drm_i915_gem_object *)cache->node.mm);
961 struct i915_ggtt *ggtt = cache_to_ggtt(cache);
963 intel_gt_flush_ggtt_writes(ggtt->vm.gt);
964 io_mapping_unmap_atomic((void __iomem *)vaddr);
966 if (cache->node.allocated) {
967 ggtt->vm.clear_range(&ggtt->vm,
970 drm_mm_remove_node(&cache->node);
972 i915_vma_unpin((struct i915_vma *)cache->node.mm);
980 static void *reloc_kmap(struct drm_i915_gem_object *obj,
981 struct reloc_cache *cache,
987 kunmap_atomic(unmask_page(cache->vaddr));
989 unsigned int flushes;
992 err = i915_gem_object_prepare_write(obj, &flushes);
996 BUILD_BUG_ON(KMAP & CLFLUSH_FLAGS);
997 BUILD_BUG_ON((KMAP | CLFLUSH_FLAGS) & PAGE_MASK);
999 cache->vaddr = flushes | KMAP;
1000 cache->node.mm = (void *)obj;
1005 vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj, page));
1006 cache->vaddr = unmask_flags(cache->vaddr) | (unsigned long)vaddr;
1012 static void *reloc_iomap(struct drm_i915_gem_object *obj,
1013 struct reloc_cache *cache,
1016 struct i915_ggtt *ggtt = cache_to_ggtt(cache);
1017 unsigned long offset;
1021 intel_gt_flush_ggtt_writes(ggtt->vm.gt);
1022 io_mapping_unmap_atomic((void __force __iomem *) unmask_page(cache->vaddr));
1024 struct i915_vma *vma;
1027 if (i915_gem_object_is_tiled(obj))
1028 return ERR_PTR(-EINVAL);
1030 if (use_cpu_reloc(cache, obj))
1033 i915_gem_object_lock(obj);
1034 err = i915_gem_object_set_to_gtt_domain(obj, true);
1035 i915_gem_object_unlock(obj);
1037 return ERR_PTR(err);
1039 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
1041 PIN_NONBLOCK /* NOWARN */ |
1044 memset(&cache->node, 0, sizeof(cache->node));
1045 err = drm_mm_insert_node_in_range
1046 (&ggtt->vm.mm, &cache->node,
1047 PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
1048 0, ggtt->mappable_end,
1050 if (err) /* no inactive aperture space, use cpu reloc */
1053 cache->node.start = vma->node.start;
1054 cache->node.mm = (void *)vma;
1058 offset = cache->node.start;
1059 if (cache->node.allocated) {
1060 ggtt->vm.insert_page(&ggtt->vm,
1061 i915_gem_object_get_dma_address(obj, page),
1062 offset, I915_CACHE_NONE, 0);
1064 offset += page << PAGE_SHIFT;
1067 vaddr = (void __force *)io_mapping_map_atomic_wc(&ggtt->iomap,
1070 cache->vaddr = (unsigned long)vaddr;
1075 static void *reloc_vaddr(struct drm_i915_gem_object *obj,
1076 struct reloc_cache *cache,
1081 if (cache->page == page) {
1082 vaddr = unmask_page(cache->vaddr);
1085 if ((cache->vaddr & KMAP) == 0)
1086 vaddr = reloc_iomap(obj, cache, page);
1088 vaddr = reloc_kmap(obj, cache, page);
1094 static void clflush_write32(u32 *addr, u32 value, unsigned int flushes)
1096 if (unlikely(flushes & (CLFLUSH_BEFORE | CLFLUSH_AFTER))) {
1097 if (flushes & CLFLUSH_BEFORE) {
1105 * Writes to the same cacheline are serialised by the CPU
1106 * (including clflush). On the write path, we only require
1107 * that it hits memory in an orderly fashion and place
1108 * mb barriers at the start and end of the relocation phase
1109 * to ensure ordering of clflush wrt to the system.
1111 if (flushes & CLFLUSH_AFTER)
1117 static int reloc_move_to_gpu(struct i915_request *rq, struct i915_vma *vma)
1119 struct drm_i915_gem_object *obj = vma->obj;
1124 if (obj->cache_dirty & ~obj->cache_coherent)
1125 i915_gem_clflush_object(obj, 0);
1126 obj->write_domain = 0;
1128 err = i915_request_await_object(rq, vma->obj, true);
1130 err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
1132 i915_vma_unlock(vma);
1137 static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
1138 struct i915_vma *vma,
1141 struct reloc_cache *cache = &eb->reloc_cache;
1142 struct intel_engine_pool_node *pool;
1143 struct i915_request *rq;
1144 struct i915_vma *batch;
1148 pool = intel_engine_pool_get(&eb->engine->pool, PAGE_SIZE);
1150 return PTR_ERR(pool);
1152 cmd = i915_gem_object_pin_map(pool->obj,
1161 batch = i915_vma_instance(pool->obj, vma->vm, NULL);
1162 if (IS_ERR(batch)) {
1163 err = PTR_ERR(batch);
1167 err = i915_vma_pin(batch, 0, 0, PIN_USER | PIN_NONBLOCK);
1171 rq = i915_request_create(eb->context);
1177 err = intel_engine_pool_mark_active(pool, rq);
1181 err = reloc_move_to_gpu(rq, vma);
1185 err = eb->engine->emit_bb_start(rq,
1186 batch->node.start, PAGE_SIZE,
1187 cache->gen > 5 ? 0 : I915_DISPATCH_SECURE);
1191 i915_vma_lock(batch);
1192 err = i915_request_await_object(rq, batch->obj, false);
1194 err = i915_vma_move_to_active(batch, rq, 0);
1195 i915_vma_unlock(batch);
1200 i915_vma_unpin(batch);
1203 cache->rq_cmd = cmd;
1206 /* Return with batch mapping (cmd) still pinned */
1210 i915_request_skip(rq, err);
1212 i915_request_add(rq);
1214 i915_vma_unpin(batch);
1216 i915_gem_object_unpin_map(pool->obj);
1218 intel_engine_pool_put(pool);
1222 static u32 *reloc_gpu(struct i915_execbuffer *eb,
1223 struct i915_vma *vma,
1226 struct reloc_cache *cache = &eb->reloc_cache;
1229 if (cache->rq_size > PAGE_SIZE/sizeof(u32) - (len + 1))
1230 reloc_gpu_flush(cache);
1232 if (unlikely(!cache->rq)) {
1235 /* If we need to copy for the cmdparser, we will stall anyway */
1236 if (eb_use_cmdparser(eb))
1237 return ERR_PTR(-EWOULDBLOCK);
1239 if (!intel_engine_can_store_dword(eb->engine))
1240 return ERR_PTR(-ENODEV);
1242 err = __reloc_gpu_alloc(eb, vma, len);
1244 return ERR_PTR(err);
1247 cmd = cache->rq_cmd + cache->rq_size;
1248 cache->rq_size += len;
1254 relocate_entry(struct i915_vma *vma,
1255 const struct drm_i915_gem_relocation_entry *reloc,
1256 struct i915_execbuffer *eb,
1257 const struct i915_vma *target)
1259 u64 offset = reloc->offset;
1260 u64 target_offset = relocation_target(reloc, target);
1261 bool wide = eb->reloc_cache.use_64bit_reloc;
1264 if (!eb->reloc_cache.vaddr &&
1265 (DBG_FORCE_RELOC == FORCE_GPU_RELOC ||
1266 !dma_resv_test_signaled_rcu(vma->resv, true))) {
1267 const unsigned int gen = eb->reloc_cache.gen;
1273 len = offset & 7 ? 8 : 5;
1279 batch = reloc_gpu(eb, vma, len);
1283 addr = gen8_canonical_addr(vma->node.start + offset);
1286 *batch++ = MI_STORE_DWORD_IMM_GEN4;
1287 *batch++ = lower_32_bits(addr);
1288 *batch++ = upper_32_bits(addr);
1289 *batch++ = lower_32_bits(target_offset);
1291 addr = gen8_canonical_addr(addr + 4);
1293 *batch++ = MI_STORE_DWORD_IMM_GEN4;
1294 *batch++ = lower_32_bits(addr);
1295 *batch++ = upper_32_bits(addr);
1296 *batch++ = upper_32_bits(target_offset);
1298 *batch++ = (MI_STORE_DWORD_IMM_GEN4 | (1 << 21)) + 1;
1299 *batch++ = lower_32_bits(addr);
1300 *batch++ = upper_32_bits(addr);
1301 *batch++ = lower_32_bits(target_offset);
1302 *batch++ = upper_32_bits(target_offset);
1304 } else if (gen >= 6) {
1305 *batch++ = MI_STORE_DWORD_IMM_GEN4;
1308 *batch++ = target_offset;
1309 } else if (gen >= 4) {
1310 *batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
1313 *batch++ = target_offset;
1315 *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
1317 *batch++ = target_offset;
1324 vaddr = reloc_vaddr(vma->obj, &eb->reloc_cache, offset >> PAGE_SHIFT);
1326 return PTR_ERR(vaddr);
1328 clflush_write32(vaddr + offset_in_page(offset),
1329 lower_32_bits(target_offset),
1330 eb->reloc_cache.vaddr);
1333 offset += sizeof(u32);
1334 target_offset >>= 32;
1340 return target->node.start | UPDATE;
1344 eb_relocate_entry(struct i915_execbuffer *eb,
1345 struct i915_vma *vma,
1346 const struct drm_i915_gem_relocation_entry *reloc)
1348 struct i915_vma *target;
1351 /* we've already hold a reference to all valid objects */
1352 target = eb_get_vma(eb, reloc->target_handle);
1353 if (unlikely(!target))
1356 /* Validate that the target is in a valid r/w GPU domain */
1357 if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
1358 DRM_DEBUG("reloc with multiple write domains: "
1359 "target %d offset %d "
1360 "read %08x write %08x",
1361 reloc->target_handle,
1362 (int) reloc->offset,
1363 reloc->read_domains,
1364 reloc->write_domain);
1367 if (unlikely((reloc->write_domain | reloc->read_domains)
1368 & ~I915_GEM_GPU_DOMAINS)) {
1369 DRM_DEBUG("reloc with read/write non-GPU domains: "
1370 "target %d offset %d "
1371 "read %08x write %08x",
1372 reloc->target_handle,
1373 (int) reloc->offset,
1374 reloc->read_domains,
1375 reloc->write_domain);
1379 if (reloc->write_domain) {
1380 *target->exec_flags |= EXEC_OBJECT_WRITE;
1383 * Sandybridge PPGTT errata: We need a global gtt mapping
1384 * for MI and pipe_control writes because the gpu doesn't
1385 * properly redirect them through the ppgtt for non_secure
1388 if (reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
1389 IS_GEN(eb->i915, 6)) {
1390 err = i915_vma_bind(target, target->obj->cache_level,
1393 "Unexpected failure to bind target VMA!"))
1399 * If the relocation already has the right value in it, no
1400 * more work needs to be done.
1402 if (!DBG_FORCE_RELOC &&
1403 gen8_canonical_addr(target->node.start) == reloc->presumed_offset)
1406 /* Check that the relocation address is valid... */
1407 if (unlikely(reloc->offset >
1408 vma->size - (eb->reloc_cache.use_64bit_reloc ? 8 : 4))) {
1409 DRM_DEBUG("Relocation beyond object bounds: "
1410 "target %d offset %d size %d.\n",
1411 reloc->target_handle,
1416 if (unlikely(reloc->offset & 3)) {
1417 DRM_DEBUG("Relocation not 4-byte aligned: "
1418 "target %d offset %d.\n",
1419 reloc->target_handle,
1420 (int)reloc->offset);
1425 * If we write into the object, we need to force the synchronisation
1426 * barrier, either with an asynchronous clflush or if we executed the
1427 * patching using the GPU (though that should be serialised by the
1428 * timeline). To be completely sure, and since we are required to
1429 * do relocations we are already stalling, disable the user's opt
1430 * out of our synchronisation.
1432 *vma->exec_flags &= ~EXEC_OBJECT_ASYNC;
1434 /* and update the user's relocation entry */
1435 return relocate_entry(vma, reloc, eb, target);
1438 static int eb_relocate_vma(struct i915_execbuffer *eb, struct i915_vma *vma)
1440 #define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
1441 struct drm_i915_gem_relocation_entry stack[N_RELOC(512)];
1442 struct drm_i915_gem_relocation_entry __user *urelocs;
1443 const struct drm_i915_gem_exec_object2 *entry = exec_entry(eb, vma);
1444 unsigned int remain;
1446 urelocs = u64_to_user_ptr(entry->relocs_ptr);
1447 remain = entry->relocation_count;
1448 if (unlikely(remain > N_RELOC(ULONG_MAX)))
1452 * We must check that the entire relocation array is safe
1453 * to read. However, if the array is not writable the user loses
1454 * the updated relocation values.
1456 if (unlikely(!access_ok(urelocs, remain*sizeof(*urelocs))))
1460 struct drm_i915_gem_relocation_entry *r = stack;
1461 unsigned int count =
1462 min_t(unsigned int, remain, ARRAY_SIZE(stack));
1463 unsigned int copied;
1466 * This is the fast path and we cannot handle a pagefault
1467 * whilst holding the struct mutex lest the user pass in the
1468 * relocations contained within a mmaped bo. For in such a case
1469 * we, the page fault handler would call i915_gem_fault() and
1470 * we would try to acquire the struct mutex again. Obviously
1471 * this is bad and so lockdep complains vehemently.
1473 pagefault_disable();
1474 copied = __copy_from_user_inatomic(r, urelocs, count * sizeof(r[0]));
1476 if (unlikely(copied)) {
1483 u64 offset = eb_relocate_entry(eb, vma, r);
1485 if (likely(offset == 0)) {
1486 } else if ((s64)offset < 0) {
1487 remain = (int)offset;
1491 * Note that reporting an error now
1492 * leaves everything in an inconsistent
1493 * state as we have *already* changed
1494 * the relocation value inside the
1495 * object. As we have not changed the
1496 * reloc.presumed_offset or will not
1497 * change the execobject.offset, on the
1498 * call we may not rewrite the value
1499 * inside the object, leaving it
1500 * dangling and causing a GPU hang. Unless
1501 * userspace dynamically rebuilds the
1502 * relocations on each execbuf rather than
1503 * presume a static tree.
1505 * We did previously check if the relocations
1506 * were writable (access_ok), an error now
1507 * would be a strange race with mprotect,
1508 * having already demonstrated that we
1509 * can read from this userspace address.
1511 offset = gen8_canonical_addr(offset & ~UPDATE);
1512 if (unlikely(__put_user(offset, &urelocs[r-stack].presumed_offset))) {
1517 } while (r++, --count);
1518 urelocs += ARRAY_SIZE(stack);
1521 reloc_cache_reset(&eb->reloc_cache);
1526 eb_relocate_vma_slow(struct i915_execbuffer *eb, struct i915_vma *vma)
1528 const struct drm_i915_gem_exec_object2 *entry = exec_entry(eb, vma);
1529 struct drm_i915_gem_relocation_entry *relocs =
1530 u64_to_ptr(typeof(*relocs), entry->relocs_ptr);
1534 for (i = 0; i < entry->relocation_count; i++) {
1535 u64 offset = eb_relocate_entry(eb, vma, &relocs[i]);
1537 if ((s64)offset < 0) {
1544 reloc_cache_reset(&eb->reloc_cache);
1548 static int check_relocations(const struct drm_i915_gem_exec_object2 *entry)
1550 const char __user *addr, *end;
1552 char __maybe_unused c;
1554 size = entry->relocation_count;
1558 if (size > N_RELOC(ULONG_MAX))
1561 addr = u64_to_user_ptr(entry->relocs_ptr);
1562 size *= sizeof(struct drm_i915_gem_relocation_entry);
1563 if (!access_ok(addr, size))
1567 for (; addr < end; addr += PAGE_SIZE) {
1568 int err = __get_user(c, addr);
1572 return __get_user(c, end - 1);
1575 static int eb_copy_relocations(const struct i915_execbuffer *eb)
1577 struct drm_i915_gem_relocation_entry *relocs;
1578 const unsigned int count = eb->buffer_count;
1582 for (i = 0; i < count; i++) {
1583 const unsigned int nreloc = eb->exec[i].relocation_count;
1584 struct drm_i915_gem_relocation_entry __user *urelocs;
1586 unsigned long copied;
1591 err = check_relocations(&eb->exec[i]);
1595 urelocs = u64_to_user_ptr(eb->exec[i].relocs_ptr);
1596 size = nreloc * sizeof(*relocs);
1598 relocs = kvmalloc_array(size, 1, GFP_KERNEL);
1604 /* copy_from_user is limited to < 4GiB */
1608 min_t(u64, BIT_ULL(31), size - copied);
1610 if (__copy_from_user((char *)relocs + copied,
1611 (char __user *)urelocs + copied,
1616 } while (copied < size);
1619 * As we do not update the known relocation offsets after
1620 * relocating (due to the complexities in lock handling),
1621 * we need to mark them as invalid now so that we force the
1622 * relocation processing next time. Just in case the target
1623 * object is evicted and then rebound into its old
1624 * presumed_offset before the next execbuffer - if that
1625 * happened we would make the mistake of assuming that the
1626 * relocations were valid.
1628 if (!user_access_begin(urelocs, size))
1631 for (copied = 0; copied < nreloc; copied++)
1633 &urelocs[copied].presumed_offset,
1637 eb->exec[i].relocs_ptr = (uintptr_t)relocs;
1649 relocs = u64_to_ptr(typeof(*relocs), eb->exec[i].relocs_ptr);
1650 if (eb->exec[i].relocation_count)
1656 static int eb_prefault_relocations(const struct i915_execbuffer *eb)
1658 const unsigned int count = eb->buffer_count;
1661 if (unlikely(i915_modparams.prefault_disable))
1664 for (i = 0; i < count; i++) {
1667 err = check_relocations(&eb->exec[i]);
1675 static noinline int eb_relocate_slow(struct i915_execbuffer *eb)
1677 struct drm_device *dev = &eb->i915->drm;
1678 bool have_copy = false;
1679 struct i915_vma *vma;
1683 if (signal_pending(current)) {
1688 /* We may process another execbuffer during the unlock... */
1690 mutex_unlock(&dev->struct_mutex);
1693 * We take 3 passes through the slowpatch.
1695 * 1 - we try to just prefault all the user relocation entries and
1696 * then attempt to reuse the atomic pagefault disabled fast path again.
1698 * 2 - we copy the user entries to a local buffer here outside of the
1699 * local and allow ourselves to wait upon any rendering before
1702 * 3 - we already have a local copy of the relocation entries, but
1703 * were interrupted (EAGAIN) whilst waiting for the objects, try again.
1706 err = eb_prefault_relocations(eb);
1707 } else if (!have_copy) {
1708 err = eb_copy_relocations(eb);
1709 have_copy = err == 0;
1715 mutex_lock(&dev->struct_mutex);
1719 /* A frequent cause for EAGAIN are currently unavailable client pages */
1720 flush_workqueue(eb->i915->mm.userptr_wq);
1722 err = i915_mutex_lock_interruptible(dev);
1724 mutex_lock(&dev->struct_mutex);
1728 /* reacquire the objects */
1729 err = eb_lookup_vmas(eb);
1733 GEM_BUG_ON(!eb->batch);
1735 list_for_each_entry(vma, &eb->relocs, reloc_link) {
1737 pagefault_disable();
1738 err = eb_relocate_vma(eb, vma);
1743 err = eb_relocate_vma_slow(eb, vma);
1750 * Leave the user relocations as are, this is the painfully slow path,
1751 * and we want to avoid the complication of dropping the lock whilst
1752 * having buffers reserved in the aperture and so causing spurious
1753 * ENOSPC for random operations.
1762 const unsigned int count = eb->buffer_count;
1765 for (i = 0; i < count; i++) {
1766 const struct drm_i915_gem_exec_object2 *entry =
1768 struct drm_i915_gem_relocation_entry *relocs;
1770 if (!entry->relocation_count)
1773 relocs = u64_to_ptr(typeof(*relocs), entry->relocs_ptr);
1781 static int eb_relocate(struct i915_execbuffer *eb)
1783 if (eb_lookup_vmas(eb))
1786 /* The objects are in their final locations, apply the relocations. */
1787 if (eb->args->flags & __EXEC_HAS_RELOC) {
1788 struct i915_vma *vma;
1790 list_for_each_entry(vma, &eb->relocs, reloc_link) {
1791 if (eb_relocate_vma(eb, vma))
1799 return eb_relocate_slow(eb);
1802 static int eb_move_to_gpu(struct i915_execbuffer *eb)
1804 const unsigned int count = eb->buffer_count;
1805 struct ww_acquire_ctx acquire;
1809 ww_acquire_init(&acquire, &reservation_ww_class);
1811 for (i = 0; i < count; i++) {
1812 struct i915_vma *vma = eb->vma[i];
1814 err = ww_mutex_lock_interruptible(&vma->resv->lock, &acquire);
1818 GEM_BUG_ON(err == -EALREADY); /* No duplicate vma */
1820 if (err == -EDEADLK) {
1825 ww_mutex_unlock(&eb->vma[j]->resv->lock);
1827 swap(eb->flags[i], eb->flags[j]);
1828 swap(eb->vma[i], eb->vma[j]);
1829 eb->vma[i]->exec_flags = &eb->flags[i];
1831 GEM_BUG_ON(vma != eb->vma[0]);
1832 vma->exec_flags = &eb->flags[0];
1834 err = ww_mutex_lock_slow_interruptible(&vma->resv->lock,
1840 ww_acquire_done(&acquire);
1843 unsigned int flags = eb->flags[i];
1844 struct i915_vma *vma = eb->vma[i];
1845 struct drm_i915_gem_object *obj = vma->obj;
1847 assert_vma_held(vma);
1849 if (flags & EXEC_OBJECT_CAPTURE) {
1850 struct i915_capture_list *capture;
1852 capture = kmalloc(sizeof(*capture), GFP_KERNEL);
1854 capture->next = eb->request->capture_list;
1856 eb->request->capture_list = capture;
1861 * If the GPU is not _reading_ through the CPU cache, we need
1862 * to make sure that any writes (both previous GPU writes from
1863 * before a change in snooping levels and normal CPU writes)
1864 * caught in that cache are flushed to main memory.
1867 * obj->cache_dirty &&
1868 * !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)
1869 * but gcc's optimiser doesn't handle that as well and emits
1870 * two jumps instead of one. Maybe one day...
1872 if (unlikely(obj->cache_dirty & ~obj->cache_coherent)) {
1873 if (i915_gem_clflush_object(obj, 0))
1874 flags &= ~EXEC_OBJECT_ASYNC;
1877 if (err == 0 && !(flags & EXEC_OBJECT_ASYNC)) {
1878 err = i915_request_await_object
1879 (eb->request, obj, flags & EXEC_OBJECT_WRITE);
1883 err = i915_vma_move_to_active(vma, eb->request, flags);
1885 i915_vma_unlock(vma);
1887 __eb_unreserve_vma(vma, flags);
1888 vma->exec_flags = NULL;
1890 if (unlikely(flags & __EXEC_OBJECT_HAS_REF))
1893 ww_acquire_fini(&acquire);
1900 /* Unconditionally flush any chipset caches (for streaming writes). */
1901 intel_gt_chipset_flush(eb->engine->gt);
1905 i915_request_skip(eb->request, err);
1909 static bool i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
1911 if (exec->flags & __I915_EXEC_ILLEGAL_FLAGS)
1914 /* Kernel clipping was a DRI1 misfeature */
1915 if (!(exec->flags & I915_EXEC_FENCE_ARRAY)) {
1916 if (exec->num_cliprects || exec->cliprects_ptr)
1920 if (exec->DR4 == 0xffffffff) {
1921 DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
1924 if (exec->DR1 || exec->DR4)
1927 if ((exec->batch_start_offset | exec->batch_len) & 0x7)
1933 static int i915_reset_gen7_sol_offsets(struct i915_request *rq)
1938 if (!IS_GEN(rq->i915, 7) || rq->engine->id != RCS0) {
1939 DRM_DEBUG("sol reset is gen7/rcs only\n");
1943 cs = intel_ring_begin(rq, 4 * 2 + 2);
1947 *cs++ = MI_LOAD_REGISTER_IMM(4);
1948 for (i = 0; i < 4; i++) {
1949 *cs++ = i915_mmio_reg_offset(GEN7_SO_WRITE_OFFSET(i));
1953 intel_ring_advance(rq, cs);
1958 static struct i915_vma *eb_parse(struct i915_execbuffer *eb, bool is_master)
1960 struct intel_engine_pool_node *pool;
1961 struct i915_vma *vma;
1964 pool = intel_engine_pool_get(&eb->engine->pool, eb->batch_len);
1966 return ERR_CAST(pool);
1968 err = intel_engine_cmd_parser(eb->engine,
1971 eb->batch_start_offset,
1975 if (err == -EACCES) /* unhandled chained batch */
1982 vma = i915_gem_object_ggtt_pin(pool->obj, NULL, 0, 0, 0);
1986 eb->vma[eb->buffer_count] = i915_vma_get(vma);
1987 eb->flags[eb->buffer_count] =
1988 __EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_REF;
1989 vma->exec_flags = &eb->flags[eb->buffer_count];
1992 vma->private = pool;
1996 intel_engine_pool_put(pool);
2001 add_to_client(struct i915_request *rq, struct drm_file *file)
2003 struct drm_i915_file_private *file_priv = file->driver_priv;
2005 rq->file_priv = file_priv;
2007 spin_lock(&file_priv->mm.lock);
2008 list_add_tail(&rq->client_link, &file_priv->mm.request_list);
2009 spin_unlock(&file_priv->mm.lock);
2012 static int eb_submit(struct i915_execbuffer *eb)
2016 err = eb_move_to_gpu(eb);
2020 if (eb->args->flags & I915_EXEC_GEN7_SOL_RESET) {
2021 err = i915_reset_gen7_sol_offsets(eb->request);
2027 * After we completed waiting for other engines (using HW semaphores)
2028 * then we can signal that this request/batch is ready to run. This
2029 * allows us to determine if the batch is still waiting on the GPU
2030 * or actually running by checking the breadcrumb.
2032 if (eb->engine->emit_init_breadcrumb) {
2033 err = eb->engine->emit_init_breadcrumb(eb->request);
2038 err = eb->engine->emit_bb_start(eb->request,
2039 eb->batch->node.start +
2040 eb->batch_start_offset,
2049 static int num_vcs_engines(const struct drm_i915_private *i915)
2051 return hweight64(INTEL_INFO(i915)->engine_mask &
2052 GENMASK_ULL(VCS0 + I915_MAX_VCS - 1, VCS0));
2056 * Find one BSD ring to dispatch the corresponding BSD command.
2057 * The engine index is returned.
2060 gen8_dispatch_bsd_engine(struct drm_i915_private *dev_priv,
2061 struct drm_file *file)
2063 struct drm_i915_file_private *file_priv = file->driver_priv;
2065 /* Check whether the file_priv has already selected one ring. */
2066 if ((int)file_priv->bsd_engine < 0)
2067 file_priv->bsd_engine =
2068 get_random_int() % num_vcs_engines(dev_priv);
2070 return file_priv->bsd_engine;
2073 static const enum intel_engine_id user_ring_map[] = {
2074 [I915_EXEC_DEFAULT] = RCS0,
2075 [I915_EXEC_RENDER] = RCS0,
2076 [I915_EXEC_BLT] = BCS0,
2077 [I915_EXEC_BSD] = VCS0,
2078 [I915_EXEC_VEBOX] = VECS0
2081 static struct i915_request *eb_throttle(struct intel_context *ce)
2083 struct intel_ring *ring = ce->ring;
2084 struct intel_timeline *tl = ce->timeline;
2085 struct i915_request *rq;
2088 * Completely unscientific finger-in-the-air estimates for suitable
2089 * maximum user request size (to avoid blocking) and then backoff.
2091 if (intel_ring_update_space(ring) >= PAGE_SIZE)
2095 * Find a request that after waiting upon, there will be at least half
2096 * the ring available. The hysteresis allows us to compete for the
2097 * shared ring and should mean that we sleep less often prior to
2098 * claiming our resources, but not so long that the ring completely
2099 * drains before we can submit our next request.
2101 list_for_each_entry(rq, &tl->requests, link) {
2102 if (rq->ring != ring)
2105 if (__intel_ring_space(rq->postfix,
2106 ring->emit, ring->size) > ring->size / 2)
2109 if (&rq->link == &tl->requests)
2110 return NULL; /* weird, we will check again later for real */
2112 return i915_request_get(rq);
2116 __eb_pin_context(struct i915_execbuffer *eb, struct intel_context *ce)
2120 if (likely(atomic_inc_not_zero(&ce->pin_count)))
2123 err = mutex_lock_interruptible(&eb->i915->drm.struct_mutex);
2127 err = __intel_context_do_pin(ce);
2128 mutex_unlock(&eb->i915->drm.struct_mutex);
2134 __eb_unpin_context(struct i915_execbuffer *eb, struct intel_context *ce)
2136 if (likely(atomic_add_unless(&ce->pin_count, -1, 1)))
2139 mutex_lock(&eb->i915->drm.struct_mutex);
2140 intel_context_unpin(ce);
2141 mutex_unlock(&eb->i915->drm.struct_mutex);
2144 static int __eb_pin_engine(struct i915_execbuffer *eb, struct intel_context *ce)
2146 struct intel_timeline *tl;
2147 struct i915_request *rq;
2151 * ABI: Before userspace accesses the GPU (e.g. execbuffer), report
2152 * EIO if the GPU is already wedged.
2154 err = intel_gt_terminally_wedged(ce->engine->gt);
2159 * Pinning the contexts may generate requests in order to acquire
2160 * GGTT space, so do this first before we reserve a seqno for
2163 err = __eb_pin_context(eb, ce);
2168 * Take a local wakeref for preparing to dispatch the execbuf as
2169 * we expect to access the hardware fairly frequently in the
2170 * process, and require the engine to be kept awake between accesses.
2171 * Upon dispatch, we acquire another prolonged wakeref that we hold
2172 * until the timeline is idle, which in turn releases the wakeref
2173 * taken on the engine, and the parent device.
2175 tl = intel_context_timeline_lock(ce);
2181 intel_context_enter(ce);
2182 rq = eb_throttle(ce);
2184 intel_context_timeline_unlock(tl);
2187 if (i915_request_wait(rq,
2188 I915_WAIT_INTERRUPTIBLE,
2189 MAX_SCHEDULE_TIMEOUT) < 0) {
2190 i915_request_put(rq);
2195 i915_request_put(rq);
2198 eb->engine = ce->engine;
2203 mutex_lock(&tl->mutex);
2204 intel_context_exit(ce);
2205 intel_context_timeline_unlock(tl);
2207 __eb_unpin_context(eb, ce);
2211 static void eb_unpin_engine(struct i915_execbuffer *eb)
2213 struct intel_context *ce = eb->context;
2214 struct intel_timeline *tl = ce->timeline;
2216 mutex_lock(&tl->mutex);
2217 intel_context_exit(ce);
2218 mutex_unlock(&tl->mutex);
2220 __eb_unpin_context(eb, ce);
2224 eb_select_legacy_ring(struct i915_execbuffer *eb,
2225 struct drm_file *file,
2226 struct drm_i915_gem_execbuffer2 *args)
2228 struct drm_i915_private *i915 = eb->i915;
2229 unsigned int user_ring_id = args->flags & I915_EXEC_RING_MASK;
2231 if (user_ring_id != I915_EXEC_BSD &&
2232 (args->flags & I915_EXEC_BSD_MASK)) {
2233 DRM_DEBUG("execbuf with non bsd ring but with invalid "
2234 "bsd dispatch flags: %d\n", (int)(args->flags));
2238 if (user_ring_id == I915_EXEC_BSD && num_vcs_engines(i915) > 1) {
2239 unsigned int bsd_idx = args->flags & I915_EXEC_BSD_MASK;
2241 if (bsd_idx == I915_EXEC_BSD_DEFAULT) {
2242 bsd_idx = gen8_dispatch_bsd_engine(i915, file);
2243 } else if (bsd_idx >= I915_EXEC_BSD_RING1 &&
2244 bsd_idx <= I915_EXEC_BSD_RING2) {
2245 bsd_idx >>= I915_EXEC_BSD_SHIFT;
2248 DRM_DEBUG("execbuf with unknown bsd ring: %u\n",
2253 return _VCS(bsd_idx);
2256 if (user_ring_id >= ARRAY_SIZE(user_ring_map)) {
2257 DRM_DEBUG("execbuf with unknown ring: %u\n", user_ring_id);
2261 return user_ring_map[user_ring_id];
2265 eb_pin_engine(struct i915_execbuffer *eb,
2266 struct drm_file *file,
2267 struct drm_i915_gem_execbuffer2 *args)
2269 struct intel_context *ce;
2273 if (i915_gem_context_user_engines(eb->gem_context))
2274 idx = args->flags & I915_EXEC_RING_MASK;
2276 idx = eb_select_legacy_ring(eb, file, args);
2278 ce = i915_gem_context_get_engine(eb->gem_context, idx);
2282 err = __eb_pin_engine(eb, ce);
2283 intel_context_put(ce);
2289 __free_fence_array(struct drm_syncobj **fences, unsigned int n)
2292 drm_syncobj_put(ptr_mask_bits(fences[n], 2));
2296 static struct drm_syncobj **
2297 get_fence_array(struct drm_i915_gem_execbuffer2 *args,
2298 struct drm_file *file)
2300 const unsigned long nfences = args->num_cliprects;
2301 struct drm_i915_gem_exec_fence __user *user;
2302 struct drm_syncobj **fences;
2306 if (!(args->flags & I915_EXEC_FENCE_ARRAY))
2309 /* Check multiplication overflow for access_ok() and kvmalloc_array() */
2310 BUILD_BUG_ON(sizeof(size_t) > sizeof(unsigned long));
2311 if (nfences > min_t(unsigned long,
2312 ULONG_MAX / sizeof(*user),
2313 SIZE_MAX / sizeof(*fences)))
2314 return ERR_PTR(-EINVAL);
2316 user = u64_to_user_ptr(args->cliprects_ptr);
2317 if (!access_ok(user, nfences * sizeof(*user)))
2318 return ERR_PTR(-EFAULT);
2320 fences = kvmalloc_array(nfences, sizeof(*fences),
2321 __GFP_NOWARN | GFP_KERNEL);
2323 return ERR_PTR(-ENOMEM);
2325 for (n = 0; n < nfences; n++) {
2326 struct drm_i915_gem_exec_fence fence;
2327 struct drm_syncobj *syncobj;
2329 if (__copy_from_user(&fence, user++, sizeof(fence))) {
2334 if (fence.flags & __I915_EXEC_FENCE_UNKNOWN_FLAGS) {
2339 syncobj = drm_syncobj_find(file, fence.handle);
2341 DRM_DEBUG("Invalid syncobj handle provided\n");
2346 BUILD_BUG_ON(~(ARCH_KMALLOC_MINALIGN - 1) &
2347 ~__I915_EXEC_FENCE_UNKNOWN_FLAGS);
2349 fences[n] = ptr_pack_bits(syncobj, fence.flags, 2);
2355 __free_fence_array(fences, n);
2356 return ERR_PTR(err);
2360 put_fence_array(struct drm_i915_gem_execbuffer2 *args,
2361 struct drm_syncobj **fences)
2364 __free_fence_array(fences, args->num_cliprects);
2368 await_fence_array(struct i915_execbuffer *eb,
2369 struct drm_syncobj **fences)
2371 const unsigned int nfences = eb->args->num_cliprects;
2375 for (n = 0; n < nfences; n++) {
2376 struct drm_syncobj *syncobj;
2377 struct dma_fence *fence;
2380 syncobj = ptr_unpack_bits(fences[n], &flags, 2);
2381 if (!(flags & I915_EXEC_FENCE_WAIT))
2384 fence = drm_syncobj_fence_get(syncobj);
2388 err = i915_request_await_dma_fence(eb->request, fence);
2389 dma_fence_put(fence);
2398 signal_fence_array(struct i915_execbuffer *eb,
2399 struct drm_syncobj **fences)
2401 const unsigned int nfences = eb->args->num_cliprects;
2402 struct dma_fence * const fence = &eb->request->fence;
2405 for (n = 0; n < nfences; n++) {
2406 struct drm_syncobj *syncobj;
2409 syncobj = ptr_unpack_bits(fences[n], &flags, 2);
2410 if (!(flags & I915_EXEC_FENCE_SIGNAL))
2413 drm_syncobj_replace_fence(syncobj, fence);
2418 i915_gem_do_execbuffer(struct drm_device *dev,
2419 struct drm_file *file,
2420 struct drm_i915_gem_execbuffer2 *args,
2421 struct drm_i915_gem_exec_object2 *exec,
2422 struct drm_syncobj **fences)
2424 struct i915_execbuffer eb;
2425 struct dma_fence *in_fence = NULL;
2426 struct dma_fence *exec_fence = NULL;
2427 struct sync_file *out_fence = NULL;
2428 int out_fence_fd = -1;
2431 BUILD_BUG_ON(__EXEC_INTERNAL_FLAGS & ~__I915_EXEC_ILLEGAL_FLAGS);
2432 BUILD_BUG_ON(__EXEC_OBJECT_INTERNAL_FLAGS &
2433 ~__EXEC_OBJECT_UNKNOWN_FLAGS);
2435 eb.i915 = to_i915(dev);
2438 if (DBG_FORCE_RELOC || !(args->flags & I915_EXEC_NO_RELOC))
2439 args->flags |= __EXEC_HAS_RELOC;
2442 eb.vma = (struct i915_vma **)(exec + args->buffer_count + 1);
2444 eb.flags = (unsigned int *)(eb.vma + args->buffer_count + 1);
2446 eb.invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
2447 reloc_cache_init(&eb.reloc_cache, eb.i915);
2449 eb.buffer_count = args->buffer_count;
2450 eb.batch_start_offset = args->batch_start_offset;
2451 eb.batch_len = args->batch_len;
2454 if (args->flags & I915_EXEC_SECURE) {
2455 if (!drm_is_current_master(file) || !capable(CAP_SYS_ADMIN))
2458 eb.batch_flags |= I915_DISPATCH_SECURE;
2460 if (args->flags & I915_EXEC_IS_PINNED)
2461 eb.batch_flags |= I915_DISPATCH_PINNED;
2463 if (args->flags & I915_EXEC_FENCE_IN) {
2464 in_fence = sync_file_get_fence(lower_32_bits(args->rsvd2));
2469 if (args->flags & I915_EXEC_FENCE_SUBMIT) {
2475 exec_fence = sync_file_get_fence(lower_32_bits(args->rsvd2));
2482 if (args->flags & I915_EXEC_FENCE_OUT) {
2483 out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
2484 if (out_fence_fd < 0) {
2486 goto err_exec_fence;
2490 err = eb_create(&eb);
2494 GEM_BUG_ON(!eb.lut_size);
2496 err = eb_select_context(&eb);
2500 err = eb_pin_engine(&eb, file, args);
2504 err = i915_mutex_lock_interruptible(dev);
2508 err = eb_relocate(&eb);
2511 * If the user expects the execobject.offset and
2512 * reloc.presumed_offset to be an exact match,
2513 * as for using NO_RELOC, then we cannot update
2514 * the execobject.offset until we have completed
2517 args->flags &= ~__EXEC_HAS_RELOC;
2521 if (unlikely(*eb.batch->exec_flags & EXEC_OBJECT_WRITE)) {
2522 DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
2526 if (eb.batch_start_offset > eb.batch->size ||
2527 eb.batch_len > eb.batch->size - eb.batch_start_offset) {
2528 DRM_DEBUG("Attempting to use out-of-bounds batch\n");
2533 if (eb_use_cmdparser(&eb)) {
2534 struct i915_vma *vma;
2536 vma = eb_parse(&eb, drm_is_current_master(file));
2544 * Batch parsed and accepted:
2546 * Set the DISPATCH_SECURE bit to remove the NON_SECURE
2547 * bit from MI_BATCH_BUFFER_START commands issued in
2548 * the dispatch_execbuffer implementations. We
2549 * specifically don't want that set on batches the
2550 * command parser has accepted.
2552 eb.batch_flags |= I915_DISPATCH_SECURE;
2553 eb.batch_start_offset = 0;
2558 if (eb.batch_len == 0)
2559 eb.batch_len = eb.batch->size - eb.batch_start_offset;
2562 * snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
2563 * batch" bit. Hence we need to pin secure batches into the global gtt.
2564 * hsw should have this fixed, but bdw mucks it up again. */
2565 if (eb.batch_flags & I915_DISPATCH_SECURE) {
2566 struct i915_vma *vma;
2569 * So on first glance it looks freaky that we pin the batch here
2570 * outside of the reservation loop. But:
2571 * - The batch is already pinned into the relevant ppgtt, so we
2572 * already have the backing storage fully allocated.
2573 * - No other BO uses the global gtt (well contexts, but meh),
2574 * so we don't really have issues with multiple objects not
2575 * fitting due to fragmentation.
2576 * So this is actually safe.
2578 vma = i915_gem_object_ggtt_pin(eb.batch->obj, NULL, 0, 0, 0);
2587 /* All GPU relocation batches must be submitted prior to the user rq */
2588 GEM_BUG_ON(eb.reloc_cache.rq);
2590 /* Allocate a request for this batch buffer nice and early. */
2591 eb.request = i915_request_create(eb.context);
2592 if (IS_ERR(eb.request)) {
2593 err = PTR_ERR(eb.request);
2594 goto err_batch_unpin;
2598 err = i915_request_await_dma_fence(eb.request, in_fence);
2604 err = i915_request_await_execution(eb.request, exec_fence,
2605 eb.engine->bond_execute);
2611 err = await_fence_array(&eb, fences);
2616 if (out_fence_fd != -1) {
2617 out_fence = sync_file_create(&eb.request->fence);
2625 * Whilst this request exists, batch_obj will be on the
2626 * active_list, and so will hold the active reference. Only when this
2627 * request is retired will the the batch_obj be moved onto the
2628 * inactive_list and lose its active reference. Hence we do not need
2629 * to explicitly hold another reference here.
2631 eb.request->batch = eb.batch;
2632 if (eb.batch->private)
2633 intel_engine_pool_mark_active(eb.batch->private, eb.request);
2635 trace_i915_request_queue(eb.request, eb.batch_flags);
2636 err = eb_submit(&eb);
2638 add_to_client(eb.request, file);
2639 i915_request_add(eb.request);
2642 signal_fence_array(&eb, fences);
2646 fd_install(out_fence_fd, out_fence->file);
2647 args->rsvd2 &= GENMASK_ULL(31, 0); /* keep in-fence */
2648 args->rsvd2 |= (u64)out_fence_fd << 32;
2651 fput(out_fence->file);
2656 if (eb.batch_flags & I915_DISPATCH_SECURE)
2657 i915_vma_unpin(eb.batch);
2658 if (eb.batch->private)
2659 intel_engine_pool_put(eb.batch->private);
2662 eb_release_vmas(&eb);
2663 mutex_unlock(&dev->struct_mutex);
2665 eb_unpin_engine(&eb);
2667 i915_gem_context_put(eb.gem_context);
2671 if (out_fence_fd != -1)
2672 put_unused_fd(out_fence_fd);
2674 dma_fence_put(exec_fence);
2676 dma_fence_put(in_fence);
2680 static size_t eb_element_size(void)
2682 return (sizeof(struct drm_i915_gem_exec_object2) +
2683 sizeof(struct i915_vma *) +
2684 sizeof(unsigned int));
2687 static bool check_buffer_count(size_t count)
2689 const size_t sz = eb_element_size();
2692 * When using LUT_HANDLE, we impose a limit of INT_MAX for the lookup
2693 * array size (see eb_create()). Otherwise, we can accept an array as
2694 * large as can be addressed (though use large arrays at your peril)!
2697 return !(count < 1 || count > INT_MAX || count > SIZE_MAX / sz - 1);
2701 * Legacy execbuffer just creates an exec2 list from the original exec object
2702 * list array and passes it to the real function.
2705 i915_gem_execbuffer_ioctl(struct drm_device *dev, void *data,
2706 struct drm_file *file)
2708 struct drm_i915_gem_execbuffer *args = data;
2709 struct drm_i915_gem_execbuffer2 exec2;
2710 struct drm_i915_gem_exec_object *exec_list = NULL;
2711 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
2712 const size_t count = args->buffer_count;
2716 if (!check_buffer_count(count)) {
2717 DRM_DEBUG("execbuf2 with %zd buffers\n", count);
2721 exec2.buffers_ptr = args->buffers_ptr;
2722 exec2.buffer_count = args->buffer_count;
2723 exec2.batch_start_offset = args->batch_start_offset;
2724 exec2.batch_len = args->batch_len;
2725 exec2.DR1 = args->DR1;
2726 exec2.DR4 = args->DR4;
2727 exec2.num_cliprects = args->num_cliprects;
2728 exec2.cliprects_ptr = args->cliprects_ptr;
2729 exec2.flags = I915_EXEC_RENDER;
2730 i915_execbuffer2_set_context_id(exec2, 0);
2732 if (!i915_gem_check_execbuffer(&exec2))
2735 /* Copy in the exec list from userland */
2736 exec_list = kvmalloc_array(count, sizeof(*exec_list),
2737 __GFP_NOWARN | GFP_KERNEL);
2738 exec2_list = kvmalloc_array(count + 1, eb_element_size(),
2739 __GFP_NOWARN | GFP_KERNEL);
2740 if (exec_list == NULL || exec2_list == NULL) {
2741 DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
2742 args->buffer_count);
2747 err = copy_from_user(exec_list,
2748 u64_to_user_ptr(args->buffers_ptr),
2749 sizeof(*exec_list) * count);
2751 DRM_DEBUG("copy %d exec entries failed %d\n",
2752 args->buffer_count, err);
2758 for (i = 0; i < args->buffer_count; i++) {
2759 exec2_list[i].handle = exec_list[i].handle;
2760 exec2_list[i].relocation_count = exec_list[i].relocation_count;
2761 exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
2762 exec2_list[i].alignment = exec_list[i].alignment;
2763 exec2_list[i].offset = exec_list[i].offset;
2764 if (INTEL_GEN(to_i915(dev)) < 4)
2765 exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
2767 exec2_list[i].flags = 0;
2770 err = i915_gem_do_execbuffer(dev, file, &exec2, exec2_list, NULL);
2771 if (exec2.flags & __EXEC_HAS_RELOC) {
2772 struct drm_i915_gem_exec_object __user *user_exec_list =
2773 u64_to_user_ptr(args->buffers_ptr);
2775 /* Copy the new buffer offsets back to the user's exec list. */
2776 for (i = 0; i < args->buffer_count; i++) {
2777 if (!(exec2_list[i].offset & UPDATE))
2780 exec2_list[i].offset =
2781 gen8_canonical_addr(exec2_list[i].offset & PIN_OFFSET_MASK);
2782 exec2_list[i].offset &= PIN_OFFSET_MASK;
2783 if (__copy_to_user(&user_exec_list[i].offset,
2784 &exec2_list[i].offset,
2785 sizeof(user_exec_list[i].offset)))
2796 i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
2797 struct drm_file *file)
2799 struct drm_i915_gem_execbuffer2 *args = data;
2800 struct drm_i915_gem_exec_object2 *exec2_list;
2801 struct drm_syncobj **fences = NULL;
2802 const size_t count = args->buffer_count;
2805 if (!check_buffer_count(count)) {
2806 DRM_DEBUG("execbuf2 with %zd buffers\n", count);
2810 if (!i915_gem_check_execbuffer(args))
2813 /* Allocate an extra slot for use by the command parser */
2814 exec2_list = kvmalloc_array(count + 1, eb_element_size(),
2815 __GFP_NOWARN | GFP_KERNEL);
2816 if (exec2_list == NULL) {
2817 DRM_DEBUG("Failed to allocate exec list for %zd buffers\n",
2821 if (copy_from_user(exec2_list,
2822 u64_to_user_ptr(args->buffers_ptr),
2823 sizeof(*exec2_list) * count)) {
2824 DRM_DEBUG("copy %zd exec entries failed\n", count);
2829 if (args->flags & I915_EXEC_FENCE_ARRAY) {
2830 fences = get_fence_array(args, file);
2831 if (IS_ERR(fences)) {
2833 return PTR_ERR(fences);
2837 err = i915_gem_do_execbuffer(dev, file, args, exec2_list, fences);
2840 * Now that we have begun execution of the batchbuffer, we ignore
2841 * any new error after this point. Also given that we have already
2842 * updated the associated relocations, we try to write out the current
2843 * object locations irrespective of any error.
2845 if (args->flags & __EXEC_HAS_RELOC) {
2846 struct drm_i915_gem_exec_object2 __user *user_exec_list =
2847 u64_to_user_ptr(args->buffers_ptr);
2850 /* Copy the new buffer offsets back to the user's exec list. */
2852 * Note: count * sizeof(*user_exec_list) does not overflow,
2853 * because we checked 'count' in check_buffer_count().
2855 * And this range already got effectively checked earlier
2856 * when we did the "copy_from_user()" above.
2858 if (!user_access_begin(user_exec_list, count * sizeof(*user_exec_list)))
2861 for (i = 0; i < args->buffer_count; i++) {
2862 if (!(exec2_list[i].offset & UPDATE))
2865 exec2_list[i].offset =
2866 gen8_canonical_addr(exec2_list[i].offset & PIN_OFFSET_MASK);
2867 unsafe_put_user(exec2_list[i].offset,
2868 &user_exec_list[i].offset,
2876 args->flags &= ~__I915_EXEC_UNKNOWN_FLAGS;
2877 put_fence_array(args, fences);