]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - drivers/gpu/drm/drm_gem.c
Merge tag 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm
[linux.git] / drivers / gpu / drm / drm_gem.c
index d0b9f6a9953f38c61a6d0770a42bf6d575515413..50de138c89e074edfd138451a3ba75d7f1829e02 100644 (file)
  * up at a later date, and as our interface with shmfs for memory allocation.
  */
 
-/*
- * We make up offsets for buffer objects so we can recognize them at
- * mmap time.
- */
-
-/* pgoff in mmap is an unsigned long, so we need to make sure that
- * the faked up offset will fit
- */
-
-#if BITS_PER_LONG == 64
-#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
-#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
-#else
-#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1)
-#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16)
-#endif
-
 /**
  * drm_gem_init - Initialize the GEM device fields
  * @dev: drm_devic structure to initialize
@@ -171,6 +154,10 @@ void drm_gem_private_object_init(struct drm_device *dev,
        kref_init(&obj->refcount);
        obj->handle_count = 0;
        obj->size = size;
+       reservation_object_init(&obj->_resv);
+       if (!obj->resv)
+               obj->resv = &obj->_resv;
+
        drm_vma_node_reset(&obj->vma_node);
 }
 EXPORT_SYMBOL(drm_gem_private_object_init);
@@ -659,6 +646,85 @@ void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
 }
 EXPORT_SYMBOL(drm_gem_put_pages);
 
+static int objects_lookup(struct drm_file *filp, u32 *handle, int count,
+                         struct drm_gem_object **objs)
+{
+       int i, ret = 0;
+       struct drm_gem_object *obj;
+
+       spin_lock(&filp->table_lock);
+
+       for (i = 0; i < count; i++) {
+               /* Check if we currently have a reference on the object */
+               obj = idr_find(&filp->object_idr, handle[i]);
+               if (!obj) {
+                       ret = -ENOENT;
+                       break;
+               }
+               drm_gem_object_get(obj);
+               objs[i] = obj;
+       }
+       spin_unlock(&filp->table_lock);
+
+       return ret;
+}
+
+/**
+ * drm_gem_objects_lookup - look up GEM objects from an array of handles
+ * @filp: DRM file private date
+ * @bo_handles: user pointer to array of userspace handle
+ * @count: size of handle array
+ * @objs_out: returned pointer to array of drm_gem_object pointers
+ *
+ * Takes an array of userspace handles and returns a newly allocated array of
+ * GEM objects.
+ *
+ * For a single handle lookup, use drm_gem_object_lookup().
+ *
+ * Returns:
+ *
+ * @objs filled in with GEM object pointers. Returned GEM objects need to be
+ * released with drm_gem_object_put(). -ENOENT is returned on a lookup
+ * failure. 0 is returned on success.
+ *
+ */
+int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles,
+                          int count, struct drm_gem_object ***objs_out)
+{
+       int ret;
+       u32 *handles;
+       struct drm_gem_object **objs;
+
+       if (!count)
+               return 0;
+
+       objs = kvmalloc_array(count, sizeof(struct drm_gem_object *),
+                            GFP_KERNEL | __GFP_ZERO);
+       if (!objs)
+               return -ENOMEM;
+
+       handles = kvmalloc_array(count, sizeof(u32), GFP_KERNEL);
+       if (!handles) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       if (copy_from_user(handles, bo_handles, count * sizeof(u32))) {
+               ret = -EFAULT;
+               DRM_DEBUG("Failed to copy in GEM handles\n");
+               goto out;
+       }
+
+       ret = objects_lookup(filp, handles, count, objs);
+       *objs_out = objs;
+
+out:
+       kvfree(handles);
+       return ret;
+
+}
+EXPORT_SYMBOL(drm_gem_objects_lookup);
+
 /**
  * drm_gem_object_lookup - look up a GEM object from its handle
  * @filp: DRM file private date
@@ -668,24 +734,56 @@ EXPORT_SYMBOL(drm_gem_put_pages);
  *
  * A reference to the object named by the handle if such exists on @filp, NULL
  * otherwise.
+ *
+ * If looking up an array of handles, use drm_gem_objects_lookup().
  */
 struct drm_gem_object *
 drm_gem_object_lookup(struct drm_file *filp, u32 handle)
 {
+       struct drm_gem_object *obj = NULL;
+
+       objects_lookup(filp, &handle, 1, &obj);
+       return obj;
+}
+EXPORT_SYMBOL(drm_gem_object_lookup);
+
+/**
+ * drm_gem_reservation_object_wait - Wait on GEM object's reservation's objects
+ * shared and/or exclusive fences.
+ * @filep: DRM file private date
+ * @handle: userspace handle
+ * @wait_all: if true, wait on all fences, else wait on just exclusive fence
+ * @timeout: timeout value in jiffies or zero to return immediately
+ *
+ * Returns:
+ *
+ * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
+ * greater than 0 on success.
+ */
+long drm_gem_reservation_object_wait(struct drm_file *filep, u32 handle,
+                                   bool wait_all, unsigned long timeout)
+{
+       long ret;
        struct drm_gem_object *obj;
 
-       spin_lock(&filp->table_lock);
+       obj = drm_gem_object_lookup(filep, handle);
+       if (!obj) {
+               DRM_DEBUG("Failed to look up GEM BO %d\n", handle);
+               return -EINVAL;
+       }
 
-       /* Check if we currently have a reference on the object */
-       obj = idr_find(&filp->object_idr, handle);
-       if (obj)
-               drm_gem_object_get(obj);
+       ret = reservation_object_wait_timeout_rcu(obj->resv, wait_all,
+                                                 true, timeout);
+       if (ret == 0)
+               ret = -ETIME;
+       else if (ret > 0)
+               ret = 0;
 
-       spin_unlock(&filp->table_lock);
+       drm_gem_object_put_unlocked(obj);
 
-       return obj;
+       return ret;
 }
-EXPORT_SYMBOL(drm_gem_object_lookup);
+EXPORT_SYMBOL(drm_gem_reservation_object_wait);
 
 /**
  * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl
@@ -851,6 +949,7 @@ drm_gem_object_release(struct drm_gem_object *obj)
        if (obj->filp)
                fput(obj->filp);
 
+       reservation_object_fini(&obj->_resv);
        drm_gem_free_mmap_offset(obj);
 }
 EXPORT_SYMBOL(drm_gem_object_release);
@@ -1190,3 +1289,174 @@ void drm_gem_vunmap(struct drm_gem_object *obj, void *vaddr)
                obj->dev->driver->gem_prime_vunmap(obj, vaddr);
 }
 EXPORT_SYMBOL(drm_gem_vunmap);
+
+/**
+ * drm_gem_lock_reservations - Sets up the ww context and acquires
+ * the lock on an array of GEM objects.
+ *
+ * Once you've locked your reservations, you'll want to set up space
+ * for your shared fences (if applicable), submit your job, then
+ * drm_gem_unlock_reservations().
+ *
+ * @objs: drm_gem_objects to lock
+ * @count: Number of objects in @objs
+ * @acquire_ctx: struct ww_acquire_ctx that will be initialized as
+ * part of tracking this set of locked reservations.
+ */
+int
+drm_gem_lock_reservations(struct drm_gem_object **objs, int count,
+                         struct ww_acquire_ctx *acquire_ctx)
+{
+       int contended = -1;
+       int i, ret;
+
+       ww_acquire_init(acquire_ctx, &reservation_ww_class);
+
+retry:
+       if (contended != -1) {
+               struct drm_gem_object *obj = objs[contended];
+
+               ret = ww_mutex_lock_slow_interruptible(&obj->resv->lock,
+                                                      acquire_ctx);
+               if (ret) {
+                       ww_acquire_done(acquire_ctx);
+                       return ret;
+               }
+       }
+
+       for (i = 0; i < count; i++) {
+               if (i == contended)
+                       continue;
+
+               ret = ww_mutex_lock_interruptible(&objs[i]->resv->lock,
+                                                 acquire_ctx);
+               if (ret) {
+                       int j;
+
+                       for (j = 0; j < i; j++)
+                               ww_mutex_unlock(&objs[j]->resv->lock);
+
+                       if (contended != -1 && contended >= i)
+                               ww_mutex_unlock(&objs[contended]->resv->lock);
+
+                       if (ret == -EDEADLK) {
+                               contended = i;
+                               goto retry;
+                       }
+
+                       ww_acquire_done(acquire_ctx);
+                       return ret;
+               }
+       }
+
+       ww_acquire_done(acquire_ctx);
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_gem_lock_reservations);
+
+void
+drm_gem_unlock_reservations(struct drm_gem_object **objs, int count,
+                           struct ww_acquire_ctx *acquire_ctx)
+{
+       int i;
+
+       for (i = 0; i < count; i++)
+               ww_mutex_unlock(&objs[i]->resv->lock);
+
+       ww_acquire_fini(acquire_ctx);
+}
+EXPORT_SYMBOL(drm_gem_unlock_reservations);
+
+/**
+ * drm_gem_fence_array_add - Adds the fence to an array of fences to be
+ * waited on, deduplicating fences from the same context.
+ *
+ * @fence_array: array of dma_fence * for the job to block on.
+ * @fence: the dma_fence to add to the list of dependencies.
+ *
+ * Returns:
+ * 0 on success, or an error on failing to expand the array.
+ */
+int drm_gem_fence_array_add(struct xarray *fence_array,
+                           struct dma_fence *fence)
+{
+       struct dma_fence *entry;
+       unsigned long index;
+       u32 id = 0;
+       int ret;
+
+       if (!fence)
+               return 0;
+
+       /* Deduplicate if we already depend on a fence from the same context.
+        * This lets the size of the array of deps scale with the number of
+        * engines involved, rather than the number of BOs.
+        */
+       xa_for_each(fence_array, index, entry) {
+               if (entry->context != fence->context)
+                       continue;
+
+               if (dma_fence_is_later(fence, entry)) {
+                       dma_fence_put(entry);
+                       xa_store(fence_array, index, fence, GFP_KERNEL);
+               } else {
+                       dma_fence_put(fence);
+               }
+               return 0;
+       }
+
+       ret = xa_alloc(fence_array, &id, fence, xa_limit_32b, GFP_KERNEL);
+       if (ret != 0)
+               dma_fence_put(fence);
+
+       return ret;
+}
+EXPORT_SYMBOL(drm_gem_fence_array_add);
+
+/**
+ * drm_gem_fence_array_add_implicit - Adds the implicit dependencies tracked
+ * in the GEM object's reservation object to an array of dma_fences for use in
+ * scheduling a rendering job.
+ *
+ * This should be called after drm_gem_lock_reservations() on your array of
+ * GEM objects used in the job but before updating the reservations with your
+ * own fences.
+ *
+ * @fence_array: array of dma_fence * for the job to block on.
+ * @obj: the gem object to add new dependencies from.
+ * @write: whether the job might write the object (so we need to depend on
+ * shared fences in the reservation object).
+ */
+int drm_gem_fence_array_add_implicit(struct xarray *fence_array,
+                                    struct drm_gem_object *obj,
+                                    bool write)
+{
+       int ret;
+       struct dma_fence **fences;
+       unsigned int i, fence_count;
+
+       if (!write) {
+               struct dma_fence *fence =
+                       reservation_object_get_excl_rcu(obj->resv);
+
+               return drm_gem_fence_array_add(fence_array, fence);
+       }
+
+       ret = reservation_object_get_fences_rcu(obj->resv, NULL,
+                                               &fence_count, &fences);
+       if (ret || !fence_count)
+               return ret;
+
+       for (i = 0; i < fence_count; i++) {
+               ret = drm_gem_fence_array_add(fence_array, fences[i]);
+               if (ret)
+                       break;
+       }
+
+       for (; i < fence_count; i++)
+               dma_fence_put(fences[i]);
+       kfree(fences);
+       return ret;
+}
+EXPORT_SYMBOL(drm_gem_fence_array_add_implicit);