From 330bec4b7ccf4a556469cfec2ad695a180e35954 Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Mon, 26 Aug 2019 17:33:11 -0500 Subject: [PATCH] drm/panfrost: Hold runtime PM reference until jobs complete Doing a pm_runtime_put as soon as a job is submitted is wrong as it should not happen until the job completes. It works currently because we are relying on the autosuspend timeout to keep the h/w enabled. Fixes: f3ba91228e8e ("drm/panfrost: Add initial panfrost driver") Cc: Tomeu Vizoso Cc: Steven Price Cc: Alyssa Rosenzweig Cc: David Airlie Cc: Daniel Vetter Signed-off-by: Rob Herring Acked-by: Alyssa Rosenzweig Reviewed-by: Steven Price Link: https://patchwork.freedesktop.org/patch/msgid/20190826223317.28509-3-robh@kernel.org --- drivers/gpu/drm/panfrost/panfrost_job.c | 39 ++++++++++++++++++------- 1 file changed, 28 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c index 05c85f45a0de..18bcc9bac6d2 100644 --- a/drivers/gpu/drm/panfrost/panfrost_job.c +++ b/drivers/gpu/drm/panfrost/panfrost_job.c @@ -150,8 +150,10 @@ static void panfrost_job_hw_submit(struct panfrost_job *job, int js) if (ret < 0) return; - if (WARN_ON(job_read(pfdev, JS_COMMAND_NEXT(js)))) - goto end; + if (WARN_ON(job_read(pfdev, JS_COMMAND_NEXT(js)))) { + pm_runtime_put_sync_autosuspend(pfdev->dev); + return; + } cfg = panfrost_mmu_as_get(pfdev, &job->file_priv->mmu); @@ -187,10 +189,6 @@ static void panfrost_job_hw_submit(struct panfrost_job *job, int js) job_write(pfdev, JS_COMMAND_NEXT(js), JS_COMMAND_START); spin_unlock_irqrestore(&pfdev->hwaccess_lock, flags); - -end: - pm_runtime_mark_last_busy(pfdev->dev); - pm_runtime_put_autosuspend(pfdev->dev); } static void panfrost_acquire_object_fences(struct drm_gem_object **bos, @@ -369,6 +367,7 @@ static void panfrost_job_timedout(struct drm_sched_job *sched_job) struct panfrost_job *job = to_panfrost_job(sched_job); struct panfrost_device *pfdev = job->pfdev; int js = panfrost_job_get_slot(job); + unsigned long flags; int i; /* @@ -394,6 +393,15 @@ static void panfrost_job_timedout(struct drm_sched_job *sched_job) if (sched_job) drm_sched_increase_karma(sched_job); + spin_lock_irqsave(&pfdev->js->job_lock, flags); + for (i = 0; i < NUM_JOB_SLOTS; i++) { + if (pfdev->jobs[i]) { + pm_runtime_put_noidle(pfdev->dev); + pfdev->jobs[i] = NULL; + } + } + spin_unlock_irqrestore(&pfdev->js->job_lock, flags); + /* panfrost_core_dump(pfdev); */ panfrost_devfreq_record_transition(pfdev, js); @@ -450,12 +458,21 @@ static irqreturn_t panfrost_job_irq_handler(int irq, void *data) } if (status & JOB_INT_MASK_DONE(j)) { - struct panfrost_job *job = pfdev->jobs[j]; + struct panfrost_job *job; + + spin_lock(&pfdev->js->job_lock); + job = pfdev->jobs[j]; + /* Only NULL if job timeout occurred */ + if (job) { + pfdev->jobs[j] = NULL; + + panfrost_mmu_as_put(pfdev, &job->file_priv->mmu); + panfrost_devfreq_record_transition(pfdev, j); - pfdev->jobs[j] = NULL; - panfrost_mmu_as_put(pfdev, &job->file_priv->mmu); - panfrost_devfreq_record_transition(pfdev, j); - dma_fence_signal(job->done_fence); + dma_fence_signal_locked(job->done_fence); + pm_runtime_put_autosuspend(pfdev->dev); + } + spin_unlock(&pfdev->js->job_lock); } status &= ~mask; -- 2.45.2