]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
crypto: inside-secure - move request dequeueing into a workqueue
authorAntoine Ténart <antoine.tenart@free-electrons.com>
Thu, 14 Dec 2017 14:26:51 +0000 (15:26 +0100)
committerHerbert Xu <herbert@gondor.apana.org.au>
Fri, 22 Dec 2017 09:03:32 +0000 (20:03 +1100)
This patch moves the request dequeueing into a workqueue to improve the
coalescing of interrupts when sending requests to the engine; as the
engine is capable of having one single interrupt for n requests sent.
Using a workqueue allows to send more request at once.

Suggested-by: Ofer Heifetz <oferh@marvell.com>
Signed-off-by: Antoine Tenart <antoine.tenart@free-electrons.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
drivers/crypto/inside-secure/safexcel.c
drivers/crypto/inside-secure/safexcel.h
drivers/crypto/inside-secure/safexcel_cipher.c
drivers/crypto/inside-secure/safexcel_hash.c

index f250c3c1ab0f1e20de05986181a88395817abfd3..f422af3eed2f1deff60aba1b03ca6e19097b9b47 100644 (file)
@@ -429,8 +429,6 @@ void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
        struct safexcel_request *request;
        int ret, nreq = 0, cdesc = 0, rdesc = 0, commands, results;
 
-       priv->ring[ring].need_dequeue = false;
-
        do {
                spin_lock_bh(&priv->ring[ring].queue_lock);
                backlog = crypto_get_backlog(&priv->ring[ring].queue);
@@ -445,8 +443,6 @@ void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
                        spin_lock_bh(&priv->ring[ring].queue_lock);
                        crypto_enqueue_request(&priv->ring[ring].queue, req);
                        spin_unlock_bh(&priv->ring[ring].queue_lock);
-
-                       priv->ring[ring].need_dequeue = true;
                        goto finalize;
                }
 
@@ -455,7 +451,6 @@ void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
                if (ret) {
                        kfree(request);
                        req->complete(req, ret);
-                       priv->ring[ring].need_dequeue = true;
                        goto finalize;
                }
 
@@ -471,9 +466,7 @@ void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
        } while (nreq++ < EIP197_MAX_BATCH_SZ);
 
 finalize:
-       if (nreq == EIP197_MAX_BATCH_SZ)
-               priv->ring[ring].need_dequeue = true;
-       else if (!nreq)
+       if (!nreq)
                return;
 
        spin_lock_bh(&priv->ring[ring].lock);
@@ -628,13 +621,18 @@ static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv
 static void safexcel_handle_result_work(struct work_struct *work)
 {
        struct safexcel_work_data *data =
-                       container_of(work, struct safexcel_work_data, work);
+                       container_of(work, struct safexcel_work_data, result_work);
        struct safexcel_crypto_priv *priv = data->priv;
 
        safexcel_handle_result_descriptor(priv, data->ring);
+}
+
+static void safexcel_dequeue_work(struct work_struct *work)
+{
+       struct safexcel_work_data *data =
+                       container_of(work, struct safexcel_work_data, work);
 
-       if (priv->ring[data->ring].need_dequeue)
-               safexcel_dequeue(data->priv, data->ring);
+       safexcel_dequeue(data->priv, data->ring);
 }
 
 struct safexcel_ring_irq_data {
@@ -665,7 +663,10 @@ static irqreturn_t safexcel_irq_ring(int irq, void *data)
                         */
                        dev_err(priv->dev, "RDR: fatal error.");
                } else if (likely(stat & EIP197_xDR_THRESH)) {
-                       queue_work(priv->ring[ring].workqueue, &priv->ring[ring].work_data.work);
+                       queue_work(priv->ring[ring].workqueue,
+                                  &priv->ring[ring].work_data.result_work);
+                       queue_work(priv->ring[ring].workqueue,
+                                  &priv->ring[ring].work_data.work);
                }
 
                /* ACK the interrupts */
@@ -846,7 +847,9 @@ static int safexcel_probe(struct platform_device *pdev)
 
                priv->ring[i].work_data.priv = priv;
                priv->ring[i].work_data.ring = i;
-               INIT_WORK(&priv->ring[i].work_data.work, safexcel_handle_result_work);
+               INIT_WORK(&priv->ring[i].work_data.result_work,
+                         safexcel_handle_result_work);
+               INIT_WORK(&priv->ring[i].work_data.work, safexcel_dequeue_work);
 
                snprintf(wq_name, 9, "wq_ring%d", i);
                priv->ring[i].workqueue = create_singlethread_workqueue(wq_name);
index d12c2b479a5e0454c59ff20447ff96477a231a6e..8e9c65183439b46b5a80bc5796b060a6c3e22e2a 100644 (file)
@@ -459,6 +459,7 @@ struct safexcel_config {
 
 struct safexcel_work_data {
        struct work_struct work;
+       struct work_struct result_work;
        struct safexcel_crypto_priv *priv;
        int ring;
 };
@@ -489,7 +490,6 @@ struct safexcel_crypto_priv {
                /* queue */
                struct crypto_queue queue;
                spinlock_t queue_lock;
-               bool need_dequeue;
        } ring[EIP197_MAX_RINGS];
 };
 
index fe1d588d6a256bf665c7098c7566bd6bcab8f8f7..0e5cc230e49ab9aa12dba0fa109ea3e7a7c33557 100644 (file)
@@ -358,8 +358,8 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
        if (enq_ret != -EINPROGRESS)
                *ret = enq_ret;
 
-       if (!priv->ring[ring].need_dequeue)
-               safexcel_dequeue(priv, ring);
+       queue_work(priv->ring[ring].workqueue,
+                  &priv->ring[ring].work_data.work);
 
        *should_complete = false;
 
@@ -448,8 +448,8 @@ static int safexcel_cipher_exit_inv(struct crypto_tfm *tfm)
        crypto_enqueue_request(&priv->ring[ring].queue, &req->base);
        spin_unlock_bh(&priv->ring[ring].queue_lock);
 
-       if (!priv->ring[ring].need_dequeue)
-               safexcel_dequeue(priv, ring);
+       queue_work(priv->ring[ring].workqueue,
+                  &priv->ring[ring].work_data.work);
 
        wait_for_completion_interruptible(&result.completion);
 
@@ -495,8 +495,8 @@ static int safexcel_aes(struct skcipher_request *req,
        ret = crypto_enqueue_request(&priv->ring[ring].queue, &req->base);
        spin_unlock_bh(&priv->ring[ring].queue_lock);
 
-       if (!priv->ring[ring].need_dequeue)
-               safexcel_dequeue(priv, ring);
+       queue_work(priv->ring[ring].workqueue,
+                  &priv->ring[ring].work_data.work);
 
        return ret;
 }
index d233f4a0971239dfe8128510c395236209d98a09..2fb5bc6b6268f37dff28a49a3edb61968c25309c 100644 (file)
@@ -381,8 +381,8 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
        if (enq_ret != -EINPROGRESS)
                *ret = enq_ret;
 
-       if (!priv->ring[ring].need_dequeue)
-               safexcel_dequeue(priv, ring);
+       queue_work(priv->ring[ring].workqueue,
+                  &priv->ring[ring].work_data.work);
 
        *should_complete = false;
 
@@ -470,8 +470,8 @@ static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm)
        crypto_enqueue_request(&priv->ring[ring].queue, &req->base);
        spin_unlock_bh(&priv->ring[ring].queue_lock);
 
-       if (!priv->ring[ring].need_dequeue)
-               safexcel_dequeue(priv, ring);
+       queue_work(priv->ring[ring].workqueue,
+                  &priv->ring[ring].work_data.work);
 
        wait_for_completion_interruptible(&result.completion);
 
@@ -556,8 +556,8 @@ static int safexcel_ahash_enqueue(struct ahash_request *areq)
        ret = crypto_enqueue_request(&priv->ring[ring].queue, &areq->base);
        spin_unlock_bh(&priv->ring[ring].queue_lock);
 
-       if (!priv->ring[ring].need_dequeue)
-               safexcel_dequeue(priv, ring);
+       queue_work(priv->ring[ring].workqueue,
+                  &priv->ring[ring].work_data.work);
 
        return ret;
 }