]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
drm/v3d: Add missing implicit synchronization.
authorEric Anholt <eric@anholt.net>
Tue, 16 Apr 2019 22:58:56 +0000 (15:58 -0700)
committerEric Anholt <eric@anholt.net>
Thu, 18 Apr 2019 16:54:16 +0000 (09:54 -0700)
It is the expectation of existing userspace (X11 + Mesa, in
particular) that jobs submitted to the kernel against a shared BO will
get implicitly synchronized by their submission order.  If we want to
allow clever userspace to disable implicit synchronization, we should
do that under its own submit flag (as amdgpu and lima do).

Note that we currently only implicitly sync for the rendering pass,
not binning -- if you texture-from-pixmap in the binning vertex shader
(vertex coordinate generation), you'll miss out on synchronization.

Fixes flickering when multiple clients are running in parallel,
particularly GL apps and compositors.

v2: Fix a missing refcount on the CSD done fence for L2 cleaning.

Signed-off-by: Eric Anholt <eric@anholt.net>
Link: https://patchwork.freedesktop.org/patch/msgid/20190416225856.20264-6-eric@anholt.net
Acked-by: Rob Clark <robdclark@gmail.com>
drivers/gpu/drm/v3d/v3d_drv.h
drivers/gpu/drm/v3d/v3d_gem.c
drivers/gpu/drm/v3d/v3d_sched.c

index 3d816e1674a04a2e4789f0271e8bbcebe3ba95f4..47b86a25629eb4d971bcbcf125e92a4a4177d392 100644 (file)
@@ -182,8 +182,10 @@ struct v3d_job {
        struct drm_gem_object **bo;
        u32 bo_count;
 
-       /* An optional fence userspace can pass in for the job to depend on. */
-       struct dma_fence *in_fence;
+       /* Array of struct dma_fence * to block on before submitting this job.
+        */
+       struct xarray deps;
+       unsigned long last_dep;
 
        /* v3d fence to be signaled by IRQ handler when the job is complete. */
        struct dma_fence *irq_fence;
@@ -215,11 +217,6 @@ struct v3d_bin_job {
 struct v3d_render_job {
        struct v3d_job base;
 
-       /* Optional fence for the binner, to depend on before starting
-        * our job.
-        */
-       struct dma_fence *bin_done_fence;
-
        /* GPU virtual addresses of the start/end of the CL job. */
        u32 start, end;
 
index 6873b14a0d38af2024fb6c8a7508d69772090776..f736e021467ac4f6846f05bf3e387c4d64bf7965 100644 (file)
@@ -243,16 +243,25 @@ v3d_invalidate_caches(struct v3d_dev *v3d)
  * to v3d, so we don't attach dma-buf fences to them.
  */
 static int
-v3d_lock_bo_reservations(struct drm_gem_object **bos,
-                        int bo_count,
+v3d_lock_bo_reservations(struct v3d_job *job,
                         struct ww_acquire_ctx *acquire_ctx)
 {
        int i, ret;
 
-       ret = drm_gem_lock_reservations(bos, bo_count, acquire_ctx);
+       ret = drm_gem_lock_reservations(job->bo, job->bo_count, acquire_ctx);
        if (ret)
                return ret;
 
+       for (i = 0; i < job->bo_count; i++) {
+               ret = drm_gem_fence_array_add_implicit(&job->deps,
+                                                      job->bo[i], true);
+               if (ret) {
+                       drm_gem_unlock_reservations(job->bo, job->bo_count,
+                                                   acquire_ctx);
+                       return ret;
+               }
+       }
+
        return 0;
 }
 
@@ -339,6 +348,8 @@ static void
 v3d_job_free(struct kref *ref)
 {
        struct v3d_job *job = container_of(ref, struct v3d_job, refcount);
+       unsigned long index;
+       struct dma_fence *fence;
        int i;
 
        for (i = 0; i < job->bo_count; i++) {
@@ -347,7 +358,11 @@ v3d_job_free(struct kref *ref)
        }
        kvfree(job->bo);
 
-       dma_fence_put(job->in_fence);
+       xa_for_each(&job->deps, index, fence) {
+               dma_fence_put(fence);
+       }
+       xa_destroy(&job->deps);
+
        dma_fence_put(job->irq_fence);
        dma_fence_put(job->done_fence);
 
@@ -414,6 +429,7 @@ v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv,
             struct v3d_job *job, void (*free)(struct kref *ref),
             u32 in_sync)
 {
+       struct dma_fence *in_fence = NULL;
        int ret;
 
        job->v3d = v3d;
@@ -423,15 +439,23 @@ v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv,
        if (ret < 0)
                return ret;
 
-       ret = drm_syncobj_find_fence(file_priv, in_sync, 0, 0, &job->in_fence);
-       if (ret == -EINVAL) {
-               pm_runtime_put_autosuspend(v3d->dev);
-               return ret;
-       }
+       xa_init_flags(&job->deps, XA_FLAGS_ALLOC);
+
+       ret = drm_syncobj_find_fence(file_priv, in_sync, 0, 0, &in_fence);
+       if (ret == -EINVAL)
+               goto fail;
+
+       ret = drm_gem_fence_array_add(&job->deps, in_fence);
+       if (ret)
+               goto fail;
 
        kref_init(&job->refcount);
 
        return 0;
+fail:
+       xa_destroy(&job->deps);
+       pm_runtime_put_autosuspend(v3d->dev);
+       return ret;
 }
 
 static int
@@ -552,8 +576,7 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
        if (ret)
                goto fail;
 
-       ret = v3d_lock_bo_reservations(render->base.bo, render->base.bo_count,
-                                      &acquire_ctx);
+       ret = v3d_lock_bo_reservations(&render->base, &acquire_ctx);
        if (ret)
                goto fail;
 
@@ -563,7 +586,10 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
                if (ret)
                        goto fail_unreserve;
 
-               render->bin_done_fence = dma_fence_get(bin->base.done_fence);
+               ret = drm_gem_fence_array_add(&render->base.deps,
+                                             dma_fence_get(bin->base.done_fence));
+               if (ret)
+                       goto fail_unreserve;
        }
 
        ret = v3d_push_job(v3d_priv, &render->base, V3D_RENDER);
@@ -661,8 +687,7 @@ v3d_submit_tfu_ioctl(struct drm_device *dev, void *data,
        }
        spin_unlock(&file_priv->table_lock);
 
-       ret = v3d_lock_bo_reservations(job->base.bo, job->base.bo_count,
-                                      &acquire_ctx);
+       ret = v3d_lock_bo_reservations(&job->base, &acquire_ctx);
        if (ret)
                goto fail;
 
@@ -751,9 +776,7 @@ v3d_submit_csd_ioctl(struct drm_device *dev, void *data,
        if (ret)
                goto fail;
 
-       ret = v3d_lock_bo_reservations(clean_job->base.bo,
-                                      clean_job->base.bo_count,
-                                      &acquire_ctx);
+       ret = v3d_lock_bo_reservations(clean_job, &acquire_ctx);
        if (ret)
                goto fail;
 
@@ -762,7 +785,11 @@ v3d_submit_csd_ioctl(struct drm_device *dev, void *data,
        if (ret)
                goto fail_unreserve;
 
-       clean_job->in_fence = dma_fence_get(job->base.done_fence);
+       ret = drm_gem_fence_array_add(&clean_job->deps,
+                                     dma_fence_get(job->base.done_fence));
+       if (ret)
+               goto fail_unreserve;
+
        ret = v3d_push_job(v3d_priv, clean_job, V3D_CACHE_CLEAN);
        if (ret)
                goto fail_unreserve;
index ad2245701dda27916127a0999ccbf85bc4bef52c..b4255807b3a7730463e66f55c59b10c945a119ec 100644 (file)
@@ -74,47 +74,15 @@ v3d_job_dependency(struct drm_sched_job *sched_job,
                   struct drm_sched_entity *s_entity)
 {
        struct v3d_job *job = to_v3d_job(sched_job);
-       struct dma_fence *fence;
-
-       fence = job->in_fence;
-       if (fence) {
-               job->in_fence = NULL;
-               return fence;
-       }
 
        /* XXX: Wait on a fence for switching the GMP if necessary,
         * and then do so.
         */
 
-       return NULL;
-}
+       if (!xa_empty(&job->deps))
+               return xa_erase(&job->deps, job->last_dep++);
 
-/**
- * Returns the fences that the render job depends on, one by one.
- * v3d_job_run() won't be called until all of them have been signaled.
- */
-static struct dma_fence *
-v3d_render_job_dependency(struct drm_sched_job *sched_job,
-                         struct drm_sched_entity *s_entity)
-{
-       struct v3d_render_job *job = to_render_job(sched_job);
-       struct dma_fence *fence;
-
-       fence = v3d_job_dependency(sched_job, s_entity);
-       if (fence)
-               return fence;
-
-       /* If we had a bin job, the render job definitely depends on
-        * it. We first have to wait for bin to be scheduled, so that
-        * its done_fence is created.
-        */
-       fence = job->bin_done_fence;
-       if (fence) {
-               job->bin_done_fence = NULL;
-               return fence;
-       }
-
-       return fence;
+       return NULL;
 }
 
 static struct dma_fence *v3d_bin_job_run(struct drm_sched_job *sched_job)
@@ -394,7 +362,7 @@ static const struct drm_sched_backend_ops v3d_bin_sched_ops = {
 };
 
 static const struct drm_sched_backend_ops v3d_render_sched_ops = {
-       .dependency = v3d_render_job_dependency,
+       .dependency = v3d_job_dependency,
        .run_job = v3d_render_job_run,
        .timedout_job = v3d_render_job_timedout,
        .free_job = v3d_job_free,