]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
crypto: inside-secure - retry to proceed the request later on fail
authorAntoine Ténart <antoine.tenart@free-electrons.com>
Thu, 14 Dec 2017 14:26:57 +0000 (15:26 +0100)
committerHerbert Xu <herbert@gondor.apana.org.au>
Fri, 22 Dec 2017 09:03:35 +0000 (20:03 +1100)
The dequeueing function was putting back a request in the crypto queue
on failure (when not enough resources are available) which is not
perfect as the request will be handled much later. This patch updates
this logic by keeping a reference on the failed request to try
proceeding it later when enough resources are available.

Signed-off-by: Antoine Tenart <antoine.tenart@free-electrons.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
drivers/crypto/inside-secure/safexcel.c
drivers/crypto/inside-secure/safexcel.h

index 8042922b4ed8530cbd935341e57cb5c899290260..4c7f205d83f04bc8b1c5050f2f0459355e994320 100644 (file)
@@ -446,29 +446,36 @@ void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
        struct safexcel_request *request;
        int ret, nreq = 0, cdesc = 0, rdesc = 0, commands, results;
 
+       /* If a request wasn't properly dequeued because of a lack of resources,
+        * proceeded it first,
+        */
+       req = priv->ring[ring].req;
+       backlog = priv->ring[ring].backlog;
+       if (req)
+               goto handle_req;
+
        while (true) {
                spin_lock_bh(&priv->ring[ring].queue_lock);
                backlog = crypto_get_backlog(&priv->ring[ring].queue);
                req = crypto_dequeue_request(&priv->ring[ring].queue);
                spin_unlock_bh(&priv->ring[ring].queue_lock);
 
-               if (!req)
+               if (!req) {
+                       priv->ring[ring].req = NULL;
+                       priv->ring[ring].backlog = NULL;
                        goto finalize;
+               }
 
+handle_req:
                request = kzalloc(sizeof(*request), EIP197_GFP_FLAGS(*req));
-               if (!request) {
-                       spin_lock_bh(&priv->ring[ring].queue_lock);
-                       crypto_enqueue_request(&priv->ring[ring].queue, req);
-                       spin_unlock_bh(&priv->ring[ring].queue_lock);
-                       goto finalize;
-               }
+               if (!request)
+                       goto request_failed;
 
                ctx = crypto_tfm_ctx(req->tfm);
                ret = ctx->send(req, ring, request, &commands, &results);
                if (ret) {
                        kfree(request);
-                       req->complete(req, ret);
-                       goto finalize;
+                       goto request_failed;
                }
 
                if (backlog)
@@ -483,6 +490,13 @@ void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
                nreq++;
        }
 
+request_failed:
+       /* Not enough resources to handle all the requests. Bail out and save
+        * the request and the backlog for the next dequeue call (per-ring).
+        */
+       priv->ring[ring].req = req;
+       priv->ring[ring].backlog = backlog;
+
 finalize:
        if (!nreq)
                return;
index 0c47e792192d0b8f7769099c9ab09694141fb30b..d4955abf873b0d0ae3efc88c7e7fe1f65c7d8a38 100644 (file)
@@ -499,6 +499,12 @@ struct safexcel_crypto_priv {
 
                /* The ring is currently handling at least one request */
                bool busy;
+
+               /* Store for current requests when bailing out of the dequeueing
+                * function when no enough resources are available.
+                */
+               struct crypto_async_request *req;
+               struct crypto_async_request *backlog;
        } ring[EIP197_MAX_RINGS];
 };