]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
drm/etnaviv: move workqueue to be per GPU
authorLucas Stach <l.stach@pengutronix.de>
Fri, 17 Nov 2017 16:43:37 +0000 (17:43 +0100)
committerLucas Stach <l.stach@pengutronix.de>
Tue, 2 Jan 2018 16:24:28 +0000 (17:24 +0100)
While the etnaviv workqueue needs to be ordered, as we rely on work items
being executed in queuing order, this is only true for a single GPU.
Having a shared workqueue for all GPUs in the system limits concurrency
artificially.

Getting each GPU its own ordered workqueue still meets our ordering
expectations and enables retire workers to run concurrently.

Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
Reviewed-by: Philipp Zabel <p.zabel@pengutronix.de>
drivers/gpu/drm/etnaviv/etnaviv_drv.c
drivers/gpu/drm/etnaviv/etnaviv_drv.h
drivers/gpu/drm/etnaviv/etnaviv_gpu.c
drivers/gpu/drm/etnaviv/etnaviv_gpu.h

index 491eddf9b15022177bc3106181aef4cccaf89fa5..ca03b5e4789ba46c64b83bac3769cb2d1b6f0759 100644 (file)
@@ -580,12 +580,6 @@ static int etnaviv_bind(struct device *dev)
        }
        drm->dev_private = priv;
 
-       priv->wq = alloc_ordered_workqueue("etnaviv", 0);
-       if (!priv->wq) {
-               ret = -ENOMEM;
-               goto out_wq;
-       }
-
        mutex_init(&priv->gem_lock);
        INIT_LIST_HEAD(&priv->gem_list);
        priv->num_gpus = 0;
@@ -607,9 +601,6 @@ static int etnaviv_bind(struct device *dev)
 out_register:
        component_unbind_all(dev, drm);
 out_bind:
-       flush_workqueue(priv->wq);
-       destroy_workqueue(priv->wq);
-out_wq:
        kfree(priv);
 out_unref:
        drm_dev_unref(drm);
@@ -624,9 +615,6 @@ static void etnaviv_unbind(struct device *dev)
 
        drm_dev_unregister(drm);
 
-       flush_workqueue(priv->wq);
-       destroy_workqueue(priv->wq);
-
        component_unbind_all(dev, drm);
 
        drm->dev_private = NULL;
index d249acb6da0825e6e92427b0d3fe1e5f1f975a19..8668bfd4abd51be5c1c02933694090c6ae115b7d 100644 (file)
@@ -56,18 +56,8 @@ struct etnaviv_drm_private {
        /* list of GEM objects: */
        struct mutex gem_lock;
        struct list_head gem_list;
-
-       struct workqueue_struct *wq;
 };
 
-static inline void etnaviv_queue_work(struct drm_device *dev,
-       struct work_struct *w)
-{
-       struct etnaviv_drm_private *priv = dev->dev_private;
-
-       queue_work(priv->wq, w);
-}
-
 int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
                struct drm_file *file);
 
index 6ce4b9d236b4dc2f44e2fa46f3d6e727a5acff48..6176704bdae3277b88576309e35f85d156becc91 100644 (file)
@@ -958,7 +958,7 @@ static void recover_worker(struct work_struct *work)
        pm_runtime_put_autosuspend(gpu->dev);
 
        /* Retire the buffer objects in a work */
-       etnaviv_queue_work(gpu->drm, &gpu->retire_work);
+       queue_work(gpu->wq, &gpu->retire_work);
 }
 
 static void hangcheck_timer_reset(struct etnaviv_gpu *gpu)
@@ -994,7 +994,7 @@ static void hangcheck_handler(struct timer_list *t)
                dev_err(gpu->dev, "     completed fence: %u\n", fence);
                dev_err(gpu->dev, "     active fence: %u\n",
                        gpu->active_fence);
-               etnaviv_queue_work(gpu->drm, &gpu->recover_work);
+               queue_work(gpu->wq, &gpu->recover_work);
        }
 
        /* if still more pending work, reset the hangcheck timer: */
@@ -1526,7 +1526,7 @@ static irqreturn_t irq_handler(int irq, void *data)
 
                        if (gpu->event[event].sync_point) {
                                gpu->sync_point_event = event;
-                               etnaviv_queue_work(gpu->drm, &gpu->sync_point_work);
+                               queue_work(gpu->wq, &gpu->sync_point_work);
                        }
 
                        fence = gpu->event[event].fence;
@@ -1552,7 +1552,7 @@ static irqreturn_t irq_handler(int irq, void *data)
                }
 
                /* Retire the buffer objects in a work */
-               etnaviv_queue_work(gpu->drm, &gpu->retire_work);
+               queue_work(gpu->wq, &gpu->retire_work);
 
                ret = IRQ_HANDLED;
        }
@@ -1721,12 +1721,20 @@ static int etnaviv_gpu_bind(struct device *dev, struct device *master,
                        return PTR_ERR(gpu->cooling);
        }
 
+       gpu->wq = alloc_ordered_workqueue(dev_name(dev), 0);
+       if (!gpu->wq) {
+               if (IS_ENABLED(CONFIG_DRM_ETNAVIV_THERMAL))
+                       thermal_cooling_device_unregister(gpu->cooling);
+               return -ENOMEM;
+       }
+
 #ifdef CONFIG_PM
        ret = pm_runtime_get_sync(gpu->dev);
 #else
        ret = etnaviv_gpu_clk_enable(gpu);
 #endif
        if (ret < 0) {
+               destroy_workqueue(gpu->wq);
                if (IS_ENABLED(CONFIG_DRM_ETNAVIV_THERMAL))
                        thermal_cooling_device_unregister(gpu->cooling);
                return ret;
@@ -1761,6 +1769,9 @@ static void etnaviv_gpu_unbind(struct device *dev, struct device *master,
 
        hangcheck_disable(gpu);
 
+       flush_workqueue(gpu->wq);
+       destroy_workqueue(gpu->wq);
+
 #ifdef CONFIG_PM
        pm_runtime_get_sync(gpu->dev);
        pm_runtime_put_sync_suspend(gpu->dev);
index 15090bb68f5a16b4c8dbccadcf9a90f831a3f13d..ccef6139cf7071fcea21d3c39e1986e0d42ed6d2 100644 (file)
@@ -106,6 +106,7 @@ struct etnaviv_gpu {
        struct mutex lock;
        struct etnaviv_chip_identity identity;
        struct etnaviv_file_private *lastctx;
+       struct workqueue_struct *wq;
 
        /* 'ring'-buffer: */
        struct etnaviv_cmdbuf *buffer;