1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
7 #include <linux/sync_file.h>
12 #include "msm_gpu_trace.h"
15 * Cmdstream submission:
18 /* make sure these don't conflict w/ MSM_SUBMIT_BO_x */
19 #define BO_VALID 0x8000 /* is current addr in cmdstream correct/valid? */
20 #define BO_LOCKED 0x4000
21 #define BO_PINNED 0x2000
23 static struct msm_gem_submit *submit_create(struct drm_device *dev,
24 struct msm_gpu *gpu, struct msm_gpu_submitqueue *queue,
25 uint32_t nr_bos, uint32_t nr_cmds)
27 struct msm_gem_submit *submit;
28 uint64_t sz = sizeof(*submit) + ((u64)nr_bos * sizeof(submit->bos[0])) +
29 ((u64)nr_cmds * sizeof(submit->cmd[0]));
34 submit = kmalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
41 submit->cmd = (void *)&submit->bos[nr_bos];
42 submit->queue = queue;
43 submit->ring = gpu->rb[queue->prio];
45 /* initially, until copy_from_user() and bo lookup succeeds: */
49 INIT_LIST_HEAD(&submit->node);
50 INIT_LIST_HEAD(&submit->bo_list);
51 ww_acquire_init(&submit->ticket, &reservation_ww_class);
56 void msm_gem_submit_free(struct msm_gem_submit *submit)
58 dma_fence_put(submit->fence);
59 list_del(&submit->node);
61 msm_submitqueue_put(submit->queue);
66 static int submit_lookup_objects(struct msm_gem_submit *submit,
67 struct drm_msm_gem_submit *args, struct drm_file *file)
72 for (i = 0; i < args->nr_bos; i++) {
73 struct drm_msm_gem_submit_bo submit_bo;
74 void __user *userptr =
75 u64_to_user_ptr(args->bos + (i * sizeof(submit_bo)));
77 /* make sure we don't have garbage flags, in case we hit
78 * error path before flags is initialized:
80 submit->bos[i].flags = 0;
82 if (copy_from_user(&submit_bo, userptr, sizeof(submit_bo))) {
88 /* at least one of READ and/or WRITE flags should be set: */
89 #define MANDATORY_FLAGS (MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE)
91 if ((submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) ||
92 !(submit_bo.flags & MANDATORY_FLAGS)) {
93 DRM_ERROR("invalid flags: %x\n", submit_bo.flags);
99 submit->bos[i].handle = submit_bo.handle;
100 submit->bos[i].flags = submit_bo.flags;
101 /* in validate_objects() we figure out if this is true: */
102 submit->bos[i].iova = submit_bo.presumed;
105 spin_lock(&file->table_lock);
107 for (i = 0; i < args->nr_bos; i++) {
108 struct drm_gem_object *obj;
109 struct msm_gem_object *msm_obj;
111 /* normally use drm_gem_object_lookup(), but for bulk lookup
112 * all under single table_lock just hit object_idr directly:
114 obj = idr_find(&file->object_idr, submit->bos[i].handle);
116 DRM_ERROR("invalid handle %u at index %u\n", submit->bos[i].handle, i);
121 msm_obj = to_msm_bo(obj);
123 if (!list_empty(&msm_obj->submit_entry)) {
124 DRM_ERROR("handle %u at index %u already on submit list\n",
125 submit->bos[i].handle, i);
130 drm_gem_object_get(obj);
132 submit->bos[i].obj = msm_obj;
134 list_add_tail(&msm_obj->submit_entry, &submit->bo_list);
138 spin_unlock(&file->table_lock);
146 static void submit_unlock_unpin_bo(struct msm_gem_submit *submit,
149 struct msm_gem_object *msm_obj = submit->bos[i].obj;
151 if (submit->bos[i].flags & BO_PINNED)
152 msm_gem_unpin_iova(&msm_obj->base, submit->gpu->aspace);
154 if (submit->bos[i].flags & BO_LOCKED)
155 ww_mutex_unlock(&msm_obj->base.resv->lock);
157 if (backoff && !(submit->bos[i].flags & BO_VALID))
158 submit->bos[i].iova = 0;
160 submit->bos[i].flags &= ~(BO_LOCKED | BO_PINNED);
163 /* This is where we make sure all the bo's are reserved and pin'd: */
164 static int submit_lock_objects(struct msm_gem_submit *submit)
166 int contended, slow_locked = -1, i, ret = 0;
169 for (i = 0; i < submit->nr_bos; i++) {
170 struct msm_gem_object *msm_obj = submit->bos[i].obj;
172 if (slow_locked == i)
177 if (!(submit->bos[i].flags & BO_LOCKED)) {
178 ret = ww_mutex_lock_interruptible(&msm_obj->base.resv->lock,
182 submit->bos[i].flags |= BO_LOCKED;
186 ww_acquire_done(&submit->ticket);
192 submit_unlock_unpin_bo(submit, i, true);
195 submit_unlock_unpin_bo(submit, slow_locked, true);
197 if (ret == -EDEADLK) {
198 struct msm_gem_object *msm_obj = submit->bos[contended].obj;
199 /* we lost out in a seqno race, lock and retry.. */
200 ret = ww_mutex_lock_slow_interruptible(&msm_obj->base.resv->lock,
203 submit->bos[contended].flags |= BO_LOCKED;
204 slow_locked = contended;
212 static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit)
216 for (i = 0; i < submit->nr_bos; i++) {
217 struct msm_gem_object *msm_obj = submit->bos[i].obj;
218 bool write = submit->bos[i].flags & MSM_SUBMIT_BO_WRITE;
221 /* NOTE: _reserve_shared() must happen before
222 * _add_shared_fence(), which makes this a slightly
223 * strange place to call it. OTOH this is a
224 * convenient can-fail point to hook it in.
226 ret = reservation_object_reserve_shared(msm_obj->base.resv,
235 ret = msm_gem_sync_object(&msm_obj->base, submit->ring->fctx,
244 static int submit_pin_objects(struct msm_gem_submit *submit)
248 submit->valid = true;
250 for (i = 0; i < submit->nr_bos; i++) {
251 struct msm_gem_object *msm_obj = submit->bos[i].obj;
254 /* if locking succeeded, pin bo: */
255 ret = msm_gem_get_and_pin_iova(&msm_obj->base,
256 submit->gpu->aspace, &iova);
261 submit->bos[i].flags |= BO_PINNED;
263 if (iova == submit->bos[i].iova) {
264 submit->bos[i].flags |= BO_VALID;
266 submit->bos[i].iova = iova;
267 /* iova changed, so address in cmdstream is not valid: */
268 submit->bos[i].flags &= ~BO_VALID;
269 submit->valid = false;
276 static int submit_bo(struct msm_gem_submit *submit, uint32_t idx,
277 struct msm_gem_object **obj, uint64_t *iova, bool *valid)
279 if (idx >= submit->nr_bos) {
280 DRM_ERROR("invalid buffer index: %u (out of %u)\n",
281 idx, submit->nr_bos);
286 *obj = submit->bos[idx].obj;
288 *iova = submit->bos[idx].iova;
290 *valid = !!(submit->bos[idx].flags & BO_VALID);
295 /* process the reloc's and patch up the cmdstream as needed: */
296 static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *obj,
297 uint32_t offset, uint32_t nr_relocs, uint64_t relocs)
299 uint32_t i, last_offset = 0;
307 DRM_ERROR("non-aligned cmdstream buffer: %u\n", offset);
311 /* For now, just map the entire thing. Eventually we probably
312 * to do it page-by-page, w/ kmap() if not vmap()d..
314 ptr = msm_gem_get_vaddr(&obj->base);
318 DBG("failed to map: %d", ret);
322 for (i = 0; i < nr_relocs; i++) {
323 struct drm_msm_gem_submit_reloc submit_reloc;
324 void __user *userptr =
325 u64_to_user_ptr(relocs + (i * sizeof(submit_reloc)));
330 if (copy_from_user(&submit_reloc, userptr, sizeof(submit_reloc))) {
335 if (submit_reloc.submit_offset % 4) {
336 DRM_ERROR("non-aligned reloc offset: %u\n",
337 submit_reloc.submit_offset);
342 /* offset in dwords: */
343 off = submit_reloc.submit_offset / 4;
345 if ((off >= (obj->base.size / 4)) ||
346 (off < last_offset)) {
347 DRM_ERROR("invalid offset %u at reloc %u\n", off, i);
352 ret = submit_bo(submit, submit_reloc.reloc_idx, NULL, &iova, &valid);
359 iova += submit_reloc.reloc_offset;
361 if (submit_reloc.shift < 0)
362 iova >>= -submit_reloc.shift;
364 iova <<= submit_reloc.shift;
366 ptr[off] = iova | submit_reloc.or;
372 msm_gem_put_vaddr(&obj->base);
377 static void submit_cleanup(struct msm_gem_submit *submit)
381 for (i = 0; i < submit->nr_bos; i++) {
382 struct msm_gem_object *msm_obj = submit->bos[i].obj;
383 submit_unlock_unpin_bo(submit, i, false);
384 list_del_init(&msm_obj->submit_entry);
385 drm_gem_object_put(&msm_obj->base);
388 ww_acquire_fini(&submit->ticket);
391 int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
392 struct drm_file *file)
394 static atomic_t ident = ATOMIC_INIT(0);
395 struct msm_drm_private *priv = dev->dev_private;
396 struct drm_msm_gem_submit *args = data;
397 struct msm_file_private *ctx = file->driver_priv;
398 struct msm_gem_submit *submit;
399 struct msm_gpu *gpu = priv->gpu;
400 struct sync_file *sync_file = NULL;
401 struct msm_gpu_submitqueue *queue;
402 struct msm_ringbuffer *ring;
403 int out_fence_fd = -1;
404 struct pid *pid = get_pid(task_pid(current));
410 /* for now, we just have 3d pipe.. eventually this would need to
411 * be more clever to dispatch to appropriate gpu module:
413 if (MSM_PIPE_ID(args->flags) != MSM_PIPE_3D0)
416 if (MSM_PIPE_FLAGS(args->flags) & ~MSM_SUBMIT_FLAGS)
419 if (args->flags & MSM_SUBMIT_SUDO) {
420 if (!IS_ENABLED(CONFIG_DRM_MSM_GPU_SUDO) ||
421 !capable(CAP_SYS_RAWIO))
425 queue = msm_submitqueue_get(ctx, args->queueid);
429 /* Get a unique identifier for the submission for logging purposes */
430 submitid = atomic_inc_return(&ident) - 1;
432 ring = gpu->rb[queue->prio];
433 trace_msm_gpu_submit(pid_nr(pid), ring->id, submitid,
434 args->nr_bos, args->nr_cmds);
436 if (args->flags & MSM_SUBMIT_FENCE_FD_IN) {
437 struct dma_fence *in_fence;
439 in_fence = sync_file_get_fence(args->fence_fd);
445 * Wait if the fence is from a foreign context, or if the fence
446 * array contains any fence from a foreign context.
449 if (!dma_fence_match_context(in_fence, ring->fctx->context))
450 ret = dma_fence_wait(in_fence, true);
452 dma_fence_put(in_fence);
457 ret = mutex_lock_interruptible(&dev->struct_mutex);
461 if (args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
462 out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
463 if (out_fence_fd < 0) {
469 submit = submit_create(dev, gpu, queue, args->nr_bos, args->nr_cmds);
476 submit->ident = submitid;
478 if (args->flags & MSM_SUBMIT_SUDO)
479 submit->in_rb = true;
481 ret = submit_lookup_objects(submit, args, file);
485 ret = submit_lock_objects(submit);
489 ret = submit_fence_sync(submit, !!(args->flags & MSM_SUBMIT_NO_IMPLICIT));
493 ret = submit_pin_objects(submit);
497 for (i = 0; i < args->nr_cmds; i++) {
498 struct drm_msm_gem_submit_cmd submit_cmd;
499 void __user *userptr =
500 u64_to_user_ptr(args->cmds + (i * sizeof(submit_cmd)));
501 struct msm_gem_object *msm_obj;
504 ret = copy_from_user(&submit_cmd, userptr, sizeof(submit_cmd));
510 /* validate input from userspace: */
511 switch (submit_cmd.type) {
512 case MSM_SUBMIT_CMD_BUF:
513 case MSM_SUBMIT_CMD_IB_TARGET_BUF:
514 case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
517 DRM_ERROR("invalid type: %08x\n", submit_cmd.type);
522 ret = submit_bo(submit, submit_cmd.submit_idx,
523 &msm_obj, &iova, NULL);
527 if (submit_cmd.size % 4) {
528 DRM_ERROR("non-aligned cmdstream buffer size: %u\n",
534 if (!submit_cmd.size ||
535 ((submit_cmd.size + submit_cmd.submit_offset) >
536 msm_obj->base.size)) {
537 DRM_ERROR("invalid cmdstream size: %u\n", submit_cmd.size);
542 submit->cmd[i].type = submit_cmd.type;
543 submit->cmd[i].size = submit_cmd.size / 4;
544 submit->cmd[i].iova = iova + submit_cmd.submit_offset;
545 submit->cmd[i].idx = submit_cmd.submit_idx;
550 ret = submit_reloc(submit, msm_obj, submit_cmd.submit_offset,
551 submit_cmd.nr_relocs, submit_cmd.relocs);
558 submit->fence = msm_fence_alloc(ring->fctx);
559 if (IS_ERR(submit->fence)) {
560 ret = PTR_ERR(submit->fence);
561 submit->fence = NULL;
565 if (args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
566 sync_file = sync_file_create(submit->fence);
573 msm_gpu_submit(gpu, submit, ctx);
575 args->fence = submit->fence->seqno;
577 if (args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
578 fd_install(out_fence_fd, sync_file->file);
579 args->fence_fd = out_fence_fd;
583 submit_cleanup(submit);
585 msm_gem_submit_free(submit);
587 if (ret && (out_fence_fd >= 0))
588 put_unused_fd(out_fence_fd);
589 mutex_unlock(&dev->struct_mutex);