2 * Copyright 2017 Red Hat
3 * Parts ported from amdgpu (fence wait code).
4 * Copyright 2016 Advanced Micro Devices, Inc.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
32 * DRM synchronisation objects (syncobj, see struct &drm_syncobj) are
33 * persistent objects that contain an optional fence. The fence can be updated
34 * with a new fence, or be NULL.
36 * syncobj's can be waited upon, where it will wait for the underlying
39 * syncobj's can be export to fd's and back, these fd's are opaque and
40 * have no other use case, except passing the syncobj between processes.
42 * Their primary use-case is to implement Vulkan fences and semaphores.
44 * syncobj have a kref reference count, but also have an optional file.
45 * The file is only created once the syncobj is exported.
46 * The file takes a reference on the kref.
50 #include <linux/file.h>
52 #include <linux/anon_inodes.h>
53 #include <linux/sync_file.h>
54 #include <linux/sched/signal.h>
56 #include "drm_internal.h"
57 #include <drm/drm_syncobj.h>
59 struct syncobj_wait_entry {
60 struct list_head node;
61 struct task_struct *task;
62 struct dma_fence *fence;
63 struct dma_fence_cb fence_cb;
67 static void syncobj_wait_syncobj_func(struct drm_syncobj *syncobj,
68 struct syncobj_wait_entry *wait);
71 * drm_syncobj_find - lookup and reference a sync object.
72 * @file_private: drm file private pointer
73 * @handle: sync object handle to lookup.
75 * Returns a reference to the syncobj pointed to by handle or NULL. The
76 * reference must be released by calling drm_syncobj_put().
78 struct drm_syncobj *drm_syncobj_find(struct drm_file *file_private,
81 struct drm_syncobj *syncobj;
83 spin_lock(&file_private->syncobj_table_lock);
85 /* Check if we currently have a reference on the object */
86 syncobj = idr_find(&file_private->syncobj_idr, handle);
88 drm_syncobj_get(syncobj);
90 spin_unlock(&file_private->syncobj_table_lock);
94 EXPORT_SYMBOL(drm_syncobj_find);
96 static void drm_syncobj_fence_add_wait(struct drm_syncobj *syncobj,
97 struct syncobj_wait_entry *wait)
99 struct dma_fence *fence;
104 spin_lock(&syncobj->lock);
105 /* We've already tried once to get a fence and failed. Now that we
106 * have the lock, try one more time just to be sure we don't add a
107 * callback when a fence has already been set.
109 fence = dma_fence_get(rcu_dereference_protected(syncobj->fence, 1));
110 if (!fence || dma_fence_chain_find_seqno(&fence, wait->point)) {
111 dma_fence_put(fence);
112 list_add_tail(&wait->node, &syncobj->cb_list);
114 wait->fence = dma_fence_get_stub();
118 spin_unlock(&syncobj->lock);
121 static void drm_syncobj_remove_wait(struct drm_syncobj *syncobj,
122 struct syncobj_wait_entry *wait)
124 if (!wait->node.next)
127 spin_lock(&syncobj->lock);
128 list_del_init(&wait->node);
129 spin_unlock(&syncobj->lock);
133 * drm_syncobj_add_point - add new timeline point to the syncobj
134 * @syncobj: sync object to add timeline point do
135 * @chain: chain node to use to add the point
136 * @fence: fence to encapsulate in the chain node
137 * @point: sequence number to use for the point
139 * Add the chain node as new timeline point to the syncobj.
141 void drm_syncobj_add_point(struct drm_syncobj *syncobj,
142 struct dma_fence_chain *chain,
143 struct dma_fence *fence,
146 struct syncobj_wait_entry *cur, *tmp;
147 struct dma_fence *prev;
149 dma_fence_get(fence);
151 spin_lock(&syncobj->lock);
153 prev = drm_syncobj_fence_get(syncobj);
154 /* You are adding an unorder point to timeline, which could cause payload returned from query_ioctl is 0! */
155 if (prev && prev->seqno >= point)
156 DRM_ERROR("You are adding an unorder point to timeline!\n");
157 dma_fence_chain_init(chain, prev, fence, point);
158 rcu_assign_pointer(syncobj->fence, &chain->base);
160 list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node)
161 syncobj_wait_syncobj_func(syncobj, cur);
162 spin_unlock(&syncobj->lock);
164 /* Walk the chain once to trigger garbage collection */
165 dma_fence_chain_for_each(fence, prev);
168 EXPORT_SYMBOL(drm_syncobj_add_point);
171 * drm_syncobj_replace_fence - replace fence in a sync object.
172 * @syncobj: Sync object to replace fence in
173 * @fence: fence to install in sync file.
175 * This replaces the fence on a sync object.
177 void drm_syncobj_replace_fence(struct drm_syncobj *syncobj,
178 struct dma_fence *fence)
180 struct dma_fence *old_fence;
181 struct syncobj_wait_entry *cur, *tmp;
184 dma_fence_get(fence);
186 spin_lock(&syncobj->lock);
188 old_fence = rcu_dereference_protected(syncobj->fence,
189 lockdep_is_held(&syncobj->lock));
190 rcu_assign_pointer(syncobj->fence, fence);
192 if (fence != old_fence) {
193 list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node)
194 syncobj_wait_syncobj_func(syncobj, cur);
197 spin_unlock(&syncobj->lock);
199 dma_fence_put(old_fence);
201 EXPORT_SYMBOL(drm_syncobj_replace_fence);
204 * drm_syncobj_assign_null_handle - assign a stub fence to the sync object
205 * @syncobj: sync object to assign the fence on
207 * Assign a already signaled stub fence to the sync object.
209 static void drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj)
211 struct dma_fence *fence = dma_fence_get_stub();
213 drm_syncobj_replace_fence(syncobj, fence);
214 dma_fence_put(fence);
218 * drm_syncobj_find_fence - lookup and reference the fence in a sync object
219 * @file_private: drm file private pointer
220 * @handle: sync object handle to lookup.
221 * @point: timeline point
222 * @flags: DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT or not
223 * @fence: out parameter for the fence
225 * This is just a convenience function that combines drm_syncobj_find() and
226 * drm_syncobj_fence_get().
228 * Returns 0 on success or a negative error value on failure. On success @fence
229 * contains a reference to the fence, which must be released by calling
232 int drm_syncobj_find_fence(struct drm_file *file_private,
233 u32 handle, u64 point, u64 flags,
234 struct dma_fence **fence)
236 struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle);
242 *fence = drm_syncobj_fence_get(syncobj);
246 drm_syncobj_put(syncobj);
249 EXPORT_SYMBOL(drm_syncobj_find_fence);
252 * drm_syncobj_free - free a sync object.
253 * @kref: kref to free.
255 * Only to be called from kref_put in drm_syncobj_put.
257 void drm_syncobj_free(struct kref *kref)
259 struct drm_syncobj *syncobj = container_of(kref,
262 drm_syncobj_replace_fence(syncobj, NULL);
265 EXPORT_SYMBOL(drm_syncobj_free);
268 * drm_syncobj_create - create a new syncobj
269 * @out_syncobj: returned syncobj
270 * @flags: DRM_SYNCOBJ_* flags
271 * @fence: if non-NULL, the syncobj will represent this fence
273 * This is the first function to create a sync object. After creating, drivers
274 * probably want to make it available to userspace, either through
275 * drm_syncobj_get_handle() or drm_syncobj_get_fd().
277 * Returns 0 on success or a negative error value on failure.
279 int drm_syncobj_create(struct drm_syncobj **out_syncobj, uint32_t flags,
280 struct dma_fence *fence)
282 struct drm_syncobj *syncobj;
284 syncobj = kzalloc(sizeof(struct drm_syncobj), GFP_KERNEL);
288 kref_init(&syncobj->refcount);
289 INIT_LIST_HEAD(&syncobj->cb_list);
290 spin_lock_init(&syncobj->lock);
292 if (flags & DRM_SYNCOBJ_CREATE_SIGNALED)
293 drm_syncobj_assign_null_handle(syncobj);
296 drm_syncobj_replace_fence(syncobj, fence);
298 *out_syncobj = syncobj;
301 EXPORT_SYMBOL(drm_syncobj_create);
304 * drm_syncobj_get_handle - get a handle from a syncobj
305 * @file_private: drm file private pointer
306 * @syncobj: Sync object to export
307 * @handle: out parameter with the new handle
309 * Exports a sync object created with drm_syncobj_create() as a handle on
310 * @file_private to userspace.
312 * Returns 0 on success or a negative error value on failure.
314 int drm_syncobj_get_handle(struct drm_file *file_private,
315 struct drm_syncobj *syncobj, u32 *handle)
319 /* take a reference to put in the idr */
320 drm_syncobj_get(syncobj);
322 idr_preload(GFP_KERNEL);
323 spin_lock(&file_private->syncobj_table_lock);
324 ret = idr_alloc(&file_private->syncobj_idr, syncobj, 1, 0, GFP_NOWAIT);
325 spin_unlock(&file_private->syncobj_table_lock);
330 drm_syncobj_put(syncobj);
337 EXPORT_SYMBOL(drm_syncobj_get_handle);
339 static int drm_syncobj_create_as_handle(struct drm_file *file_private,
340 u32 *handle, uint32_t flags)
343 struct drm_syncobj *syncobj;
345 ret = drm_syncobj_create(&syncobj, flags, NULL);
349 ret = drm_syncobj_get_handle(file_private, syncobj, handle);
350 drm_syncobj_put(syncobj);
354 static int drm_syncobj_destroy(struct drm_file *file_private,
357 struct drm_syncobj *syncobj;
359 spin_lock(&file_private->syncobj_table_lock);
360 syncobj = idr_remove(&file_private->syncobj_idr, handle);
361 spin_unlock(&file_private->syncobj_table_lock);
366 drm_syncobj_put(syncobj);
370 static int drm_syncobj_file_release(struct inode *inode, struct file *file)
372 struct drm_syncobj *syncobj = file->private_data;
374 drm_syncobj_put(syncobj);
378 static const struct file_operations drm_syncobj_file_fops = {
379 .release = drm_syncobj_file_release,
383 * drm_syncobj_get_fd - get a file descriptor from a syncobj
384 * @syncobj: Sync object to export
385 * @p_fd: out parameter with the new file descriptor
387 * Exports a sync object created with drm_syncobj_create() as a file descriptor.
389 * Returns 0 on success or a negative error value on failure.
391 int drm_syncobj_get_fd(struct drm_syncobj *syncobj, int *p_fd)
396 fd = get_unused_fd_flags(O_CLOEXEC);
400 file = anon_inode_getfile("syncobj_file",
401 &drm_syncobj_file_fops,
405 return PTR_ERR(file);
408 drm_syncobj_get(syncobj);
409 fd_install(fd, file);
414 EXPORT_SYMBOL(drm_syncobj_get_fd);
416 static int drm_syncobj_handle_to_fd(struct drm_file *file_private,
417 u32 handle, int *p_fd)
419 struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle);
425 ret = drm_syncobj_get_fd(syncobj, p_fd);
426 drm_syncobj_put(syncobj);
430 static int drm_syncobj_fd_to_handle(struct drm_file *file_private,
433 struct drm_syncobj *syncobj;
441 if (file->f_op != &drm_syncobj_file_fops) {
446 /* take a reference to put in the idr */
447 syncobj = file->private_data;
448 drm_syncobj_get(syncobj);
450 idr_preload(GFP_KERNEL);
451 spin_lock(&file_private->syncobj_table_lock);
452 ret = idr_alloc(&file_private->syncobj_idr, syncobj, 1, 0, GFP_NOWAIT);
453 spin_unlock(&file_private->syncobj_table_lock);
460 drm_syncobj_put(syncobj);
466 static int drm_syncobj_import_sync_file_fence(struct drm_file *file_private,
469 struct dma_fence *fence = sync_file_get_fence(fd);
470 struct drm_syncobj *syncobj;
475 syncobj = drm_syncobj_find(file_private, handle);
477 dma_fence_put(fence);
481 drm_syncobj_replace_fence(syncobj, fence);
482 dma_fence_put(fence);
483 drm_syncobj_put(syncobj);
487 static int drm_syncobj_export_sync_file(struct drm_file *file_private,
488 int handle, int *p_fd)
491 struct dma_fence *fence;
492 struct sync_file *sync_file;
493 int fd = get_unused_fd_flags(O_CLOEXEC);
498 ret = drm_syncobj_find_fence(file_private, handle, 0, 0, &fence);
502 sync_file = sync_file_create(fence);
504 dma_fence_put(fence);
511 fd_install(fd, sync_file->file);
520 * drm_syncobj_open - initalizes syncobj file-private structures at devnode open time
521 * @file_private: drm file-private structure to set up
523 * Called at device open time, sets up the structure for handling refcounting
527 drm_syncobj_open(struct drm_file *file_private)
529 idr_init_base(&file_private->syncobj_idr, 1);
530 spin_lock_init(&file_private->syncobj_table_lock);
534 drm_syncobj_release_handle(int id, void *ptr, void *data)
536 struct drm_syncobj *syncobj = ptr;
538 drm_syncobj_put(syncobj);
543 * drm_syncobj_release - release file-private sync object resources
544 * @file_private: drm file-private structure to clean up
546 * Called at close time when the filp is going away.
548 * Releases any remaining references on objects by this filp.
551 drm_syncobj_release(struct drm_file *file_private)
553 idr_for_each(&file_private->syncobj_idr,
554 &drm_syncobj_release_handle, file_private);
555 idr_destroy(&file_private->syncobj_idr);
559 drm_syncobj_create_ioctl(struct drm_device *dev, void *data,
560 struct drm_file *file_private)
562 struct drm_syncobj_create *args = data;
564 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
567 /* no valid flags yet */
568 if (args->flags & ~DRM_SYNCOBJ_CREATE_SIGNALED)
571 return drm_syncobj_create_as_handle(file_private,
572 &args->handle, args->flags);
576 drm_syncobj_destroy_ioctl(struct drm_device *dev, void *data,
577 struct drm_file *file_private)
579 struct drm_syncobj_destroy *args = data;
581 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
584 /* make sure padding is empty */
587 return drm_syncobj_destroy(file_private, args->handle);
591 drm_syncobj_handle_to_fd_ioctl(struct drm_device *dev, void *data,
592 struct drm_file *file_private)
594 struct drm_syncobj_handle *args = data;
596 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
602 if (args->flags != 0 &&
603 args->flags != DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE)
606 if (args->flags & DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE)
607 return drm_syncobj_export_sync_file(file_private, args->handle,
610 return drm_syncobj_handle_to_fd(file_private, args->handle,
615 drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data,
616 struct drm_file *file_private)
618 struct drm_syncobj_handle *args = data;
620 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
626 if (args->flags != 0 &&
627 args->flags != DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE)
630 if (args->flags & DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE)
631 return drm_syncobj_import_sync_file_fence(file_private,
635 return drm_syncobj_fd_to_handle(file_private, args->fd,
639 static void syncobj_wait_fence_func(struct dma_fence *fence,
640 struct dma_fence_cb *cb)
642 struct syncobj_wait_entry *wait =
643 container_of(cb, struct syncobj_wait_entry, fence_cb);
645 wake_up_process(wait->task);
648 static void syncobj_wait_syncobj_func(struct drm_syncobj *syncobj,
649 struct syncobj_wait_entry *wait)
651 struct dma_fence *fence;
653 /* This happens inside the syncobj lock */
654 fence = rcu_dereference_protected(syncobj->fence,
655 lockdep_is_held(&syncobj->lock));
656 dma_fence_get(fence);
657 if (!fence || dma_fence_chain_find_seqno(&fence, wait->point)) {
658 dma_fence_put(fence);
661 wait->fence = dma_fence_get_stub();
666 wake_up_process(wait->task);
667 list_del_init(&wait->node);
670 static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
671 void __user *user_points,
677 struct syncobj_wait_entry *entries;
678 struct dma_fence *fence;
680 uint32_t signaled_count, i;
682 points = kmalloc_array(count, sizeof(*points), GFP_KERNEL);
687 memset(points, 0, count * sizeof(uint64_t));
689 } else if (copy_from_user(points, user_points,
690 sizeof(uint64_t) * count)) {
692 goto err_free_points;
695 entries = kcalloc(count, sizeof(*entries), GFP_KERNEL);
698 goto err_free_points;
700 /* Walk the list of sync objects and initialize entries. We do
701 * this up-front so that we can properly return -EINVAL if there is
702 * a syncobj with a missing fence and then never have the chance of
703 * returning -EINVAL again.
706 for (i = 0; i < count; ++i) {
707 struct dma_fence *fence;
709 entries[i].task = current;
710 entries[i].point = points[i];
711 fence = drm_syncobj_fence_get(syncobjs[i]);
712 if (!fence || dma_fence_chain_find_seqno(&fence, points[i])) {
713 dma_fence_put(fence);
714 if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
718 goto cleanup_entries;
723 entries[i].fence = fence;
725 entries[i].fence = dma_fence_get_stub();
727 if ((flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE) ||
728 dma_fence_is_signaled(entries[i].fence)) {
729 if (signaled_count == 0 && idx)
735 if (signaled_count == count ||
736 (signaled_count > 0 &&
737 !(flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL)))
738 goto cleanup_entries;
740 /* There's a very annoying laxness in the dma_fence API here, in
741 * that backends are not required to automatically report when a
742 * fence is signaled prior to fence->ops->enable_signaling() being
743 * called. So here if we fail to match signaled_count, we need to
744 * fallthough and try a 0 timeout wait!
747 if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
748 for (i = 0; i < count; ++i)
749 drm_syncobj_fence_add_wait(syncobjs[i], &entries[i]);
753 set_current_state(TASK_INTERRUPTIBLE);
756 for (i = 0; i < count; ++i) {
757 fence = entries[i].fence;
761 if ((flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE) ||
762 dma_fence_is_signaled(fence) ||
763 (!entries[i].fence_cb.func &&
764 dma_fence_add_callback(fence,
765 &entries[i].fence_cb,
766 syncobj_wait_fence_func))) {
767 /* The fence has been signaled */
768 if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL) {
778 if (signaled_count == count)
786 if (signal_pending(current)) {
787 timeout = -ERESTARTSYS;
791 timeout = schedule_timeout(timeout);
795 __set_current_state(TASK_RUNNING);
798 for (i = 0; i < count; ++i) {
799 drm_syncobj_remove_wait(syncobjs[i], &entries[i]);
800 if (entries[i].fence_cb.func)
801 dma_fence_remove_callback(entries[i].fence,
802 &entries[i].fence_cb);
803 dma_fence_put(entries[i].fence);
814 * drm_timeout_abs_to_jiffies - calculate jiffies timeout from absolute value
816 * @timeout_nsec: timeout nsec component in ns, 0 for poll
818 * Calculate the timeout in jiffies from an absolute time in sec/nsec.
820 signed long drm_timeout_abs_to_jiffies(int64_t timeout_nsec)
822 ktime_t abs_timeout, now;
823 u64 timeout_ns, timeout_jiffies64;
825 /* make 0 timeout means poll - absolute 0 doesn't seem valid */
826 if (timeout_nsec == 0)
829 abs_timeout = ns_to_ktime(timeout_nsec);
832 if (!ktime_after(abs_timeout, now))
835 timeout_ns = ktime_to_ns(ktime_sub(abs_timeout, now));
837 timeout_jiffies64 = nsecs_to_jiffies64(timeout_ns);
838 /* clamp timeout to avoid infinite timeout */
839 if (timeout_jiffies64 >= MAX_SCHEDULE_TIMEOUT - 1)
840 return MAX_SCHEDULE_TIMEOUT - 1;
842 return timeout_jiffies64 + 1;
844 EXPORT_SYMBOL(drm_timeout_abs_to_jiffies);
846 static int drm_syncobj_array_wait(struct drm_device *dev,
847 struct drm_file *file_private,
848 struct drm_syncobj_wait *wait,
849 struct drm_syncobj_timeline_wait *timeline_wait,
850 struct drm_syncobj **syncobjs, bool timeline)
852 signed long timeout = 0;
856 timeout = drm_timeout_abs_to_jiffies(wait->timeout_nsec);
857 timeout = drm_syncobj_array_wait_timeout(syncobjs,
864 wait->first_signaled = first;
866 timeout = drm_timeout_abs_to_jiffies(timeline_wait->timeout_nsec);
867 timeout = drm_syncobj_array_wait_timeout(syncobjs,
868 u64_to_user_ptr(timeline_wait->points),
869 timeline_wait->count_handles,
870 timeline_wait->flags,
874 timeline_wait->first_signaled = first;
879 static int drm_syncobj_array_find(struct drm_file *file_private,
880 void __user *user_handles,
881 uint32_t count_handles,
882 struct drm_syncobj ***syncobjs_out)
884 uint32_t i, *handles;
885 struct drm_syncobj **syncobjs;
888 handles = kmalloc_array(count_handles, sizeof(*handles), GFP_KERNEL);
892 if (copy_from_user(handles, user_handles,
893 sizeof(uint32_t) * count_handles)) {
895 goto err_free_handles;
898 syncobjs = kmalloc_array(count_handles, sizeof(*syncobjs), GFP_KERNEL);
899 if (syncobjs == NULL) {
901 goto err_free_handles;
904 for (i = 0; i < count_handles; i++) {
905 syncobjs[i] = drm_syncobj_find(file_private, handles[i]);
908 goto err_put_syncobjs;
913 *syncobjs_out = syncobjs;
918 drm_syncobj_put(syncobjs[i]);
926 static void drm_syncobj_array_free(struct drm_syncobj **syncobjs,
930 for (i = 0; i < count; i++)
931 drm_syncobj_put(syncobjs[i]);
936 drm_syncobj_wait_ioctl(struct drm_device *dev, void *data,
937 struct drm_file *file_private)
939 struct drm_syncobj_wait *args = data;
940 struct drm_syncobj **syncobjs;
943 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
946 if (args->flags & ~(DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL |
947 DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT))
950 if (args->count_handles == 0)
953 ret = drm_syncobj_array_find(file_private,
954 u64_to_user_ptr(args->handles),
960 ret = drm_syncobj_array_wait(dev, file_private,
961 args, NULL, syncobjs, false);
963 drm_syncobj_array_free(syncobjs, args->count_handles);
969 drm_syncobj_timeline_wait_ioctl(struct drm_device *dev, void *data,
970 struct drm_file *file_private)
972 struct drm_syncobj_timeline_wait *args = data;
973 struct drm_syncobj **syncobjs;
976 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
979 if (args->flags & ~(DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL |
980 DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
981 DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE))
984 if (args->count_handles == 0)
987 ret = drm_syncobj_array_find(file_private,
988 u64_to_user_ptr(args->handles),
994 ret = drm_syncobj_array_wait(dev, file_private,
995 NULL, args, syncobjs, true);
997 drm_syncobj_array_free(syncobjs, args->count_handles);
1004 drm_syncobj_reset_ioctl(struct drm_device *dev, void *data,
1005 struct drm_file *file_private)
1007 struct drm_syncobj_array *args = data;
1008 struct drm_syncobj **syncobjs;
1012 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
1018 if (args->count_handles == 0)
1021 ret = drm_syncobj_array_find(file_private,
1022 u64_to_user_ptr(args->handles),
1023 args->count_handles,
1028 for (i = 0; i < args->count_handles; i++)
1029 drm_syncobj_replace_fence(syncobjs[i], NULL);
1031 drm_syncobj_array_free(syncobjs, args->count_handles);
1037 drm_syncobj_signal_ioctl(struct drm_device *dev, void *data,
1038 struct drm_file *file_private)
1040 struct drm_syncobj_array *args = data;
1041 struct drm_syncobj **syncobjs;
1045 if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ))
1051 if (args->count_handles == 0)
1054 ret = drm_syncobj_array_find(file_private,
1055 u64_to_user_ptr(args->handles),
1056 args->count_handles,
1061 for (i = 0; i < args->count_handles; i++)
1062 drm_syncobj_assign_null_handle(syncobjs[i]);
1064 drm_syncobj_array_free(syncobjs, args->count_handles);