]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
octeontx2-af: Add option to disable dynamic entry caching in NDC
authorSunil Goutham <sgoutham@marvell.com>
Thu, 14 Nov 2019 05:26:32 +0000 (10:56 +0530)
committerDavid S. Miller <davem@davemloft.net>
Fri, 15 Nov 2019 02:09:16 +0000 (18:09 -0800)
A config option is added to disable caching of dynamic entries
like SQEs and stack pages. Also locks down all HW contexts in NDC,
preventing them from being evicted.

This option is useful when the queue count is large and there are
huge NDC cache misses. It's trade off between SQ context misses and
dynamically changing entries like SQE and stack page pointers.

Signed-off-by: Sunil Goutham <sgoutham@marvell.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/marvell/octeontx2/Kconfig
drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c

index 711ada7139d30a11d07e69ccc1c8ef10d751a5f0..fb34fbd62088f92f2474c0c15950237943a078fd 100644 (file)
@@ -16,3 +16,12 @@ config OCTEONTX2_AF
          Unit's admin function manager which manages all RVU HW resources
          and provides a medium to other PF/VFs to configure HW. Should be
          enabled for other RVU device drivers to work.
+
+config NDC_DIS_DYNAMIC_CACHING
+       bool "Disable caching of dynamic entries in NDC"
+       depends on OCTEONTX2_AF
+       default n
+       ---help---
+         This config option disables caching of dynamic entries such as NIX SQEs
+         , NPA stack pages etc in NDC. Also locks down NIX SQ/CQ/RQ/RSS and
+         NPA Aura/Pool contexts.
index 86042a74f6ddcd1d5ca14c83224da9213ce13cbf..63190b89f7094e5f87459a96e0f26442de8f90a9 100644 (file)
@@ -661,6 +661,21 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
        return 0;
 }
 
+static const char *nix_get_ctx_name(int ctype)
+{
+       switch (ctype) {
+       case NIX_AQ_CTYPE_CQ:
+               return "CQ";
+       case NIX_AQ_CTYPE_SQ:
+               return "SQ";
+       case NIX_AQ_CTYPE_RQ:
+               return "RQ";
+       case NIX_AQ_CTYPE_RSS:
+               return "RSS";
+       }
+       return "";
+}
+
 static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
 {
        struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
@@ -705,21 +720,60 @@ static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
                if (rc) {
                        err = rc;
                        dev_err(rvu->dev, "Failed to disable %s:%d context\n",
-                               (req->ctype == NIX_AQ_CTYPE_CQ) ?
-                               "CQ" : ((req->ctype == NIX_AQ_CTYPE_RQ) ?
-                               "RQ" : "SQ"), qidx);
+                               nix_get_ctx_name(req->ctype), qidx);
                }
        }
 
        return err;
 }
 
+#ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
+static int nix_lf_hwctx_lockdown(struct rvu *rvu, struct nix_aq_enq_req *req)
+{
+       struct nix_aq_enq_req lock_ctx_req;
+       int err;
+
+       if (req->op != NIX_AQ_INSTOP_INIT)
+               return 0;
+
+       if (req->ctype == NIX_AQ_CTYPE_MCE ||
+           req->ctype == NIX_AQ_CTYPE_DYNO)
+               return 0;
+
+       memset(&lock_ctx_req, 0, sizeof(struct nix_aq_enq_req));
+       lock_ctx_req.hdr.pcifunc = req->hdr.pcifunc;
+       lock_ctx_req.ctype = req->ctype;
+       lock_ctx_req.op = NIX_AQ_INSTOP_LOCK;
+       lock_ctx_req.qidx = req->qidx;
+       err = rvu_nix_aq_enq_inst(rvu, &lock_ctx_req, NULL);
+       if (err)
+               dev_err(rvu->dev,
+                       "PFUNC 0x%x: Failed to lock NIX %s:%d context\n",
+                       req->hdr.pcifunc,
+                       nix_get_ctx_name(req->ctype), req->qidx);
+       return err;
+}
+
+int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
+                               struct nix_aq_enq_req *req,
+                               struct nix_aq_enq_rsp *rsp)
+{
+       int err;
+
+       err = rvu_nix_aq_enq_inst(rvu, req, rsp);
+       if (!err)
+               err = nix_lf_hwctx_lockdown(rvu, req);
+       return err;
+}
+#else
+
 int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
                                struct nix_aq_enq_req *req,
                                struct nix_aq_enq_rsp *rsp)
 {
        return rvu_nix_aq_enq_inst(rvu, req, rsp);
 }
+#endif
 
 int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu,
                                       struct hwctx_disable_req *req,
@@ -2871,6 +2925,10 @@ static int nix_aq_init(struct rvu *rvu, struct rvu_block *block)
        /* Do not bypass NDC cache */
        cfg = rvu_read64(rvu, block->addr, NIX_AF_NDC_CFG);
        cfg &= ~0x3FFEULL;
+#ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
+       /* Disable caching of SQB aka SQEs */
+       cfg |= 0x04ULL;
+#endif
        rvu_write64(rvu, block->addr, NIX_AF_NDC_CFG, cfg);
 
        /* Result structure can be followed by RQ/SQ/CQ context at
index a8f9376f6a0b4ac7e5de65abedcd340f19cfd96b..6e7c7f459f745b76422f06bfe6bf84d780f55585 100644 (file)
@@ -241,12 +241,50 @@ static int npa_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req)
        return err;
 }
 
+#ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
+static int npa_lf_hwctx_lockdown(struct rvu *rvu, struct npa_aq_enq_req *req)
+{
+       struct npa_aq_enq_req lock_ctx_req;
+       int err;
+
+       if (req->op != NPA_AQ_INSTOP_INIT)
+               return 0;
+
+       memset(&lock_ctx_req, 0, sizeof(struct npa_aq_enq_req));
+       lock_ctx_req.hdr.pcifunc = req->hdr.pcifunc;
+       lock_ctx_req.ctype = req->ctype;
+       lock_ctx_req.op = NPA_AQ_INSTOP_LOCK;
+       lock_ctx_req.aura_id = req->aura_id;
+       err = rvu_npa_aq_enq_inst(rvu, &lock_ctx_req, NULL);
+       if (err)
+               dev_err(rvu->dev,
+                       "PFUNC 0x%x: Failed to lock NPA context %s:%d\n",
+                       req->hdr.pcifunc,
+                       (req->ctype == NPA_AQ_CTYPE_AURA) ?
+                       "Aura" : "Pool", req->aura_id);
+       return err;
+}
+
+int rvu_mbox_handler_npa_aq_enq(struct rvu *rvu,
+                               struct npa_aq_enq_req *req,
+                               struct npa_aq_enq_rsp *rsp)
+{
+       int err;
+
+       err = rvu_npa_aq_enq_inst(rvu, req, rsp);
+       if (!err)
+               err = npa_lf_hwctx_lockdown(rvu, req);
+       return err;
+}
+#else
+
 int rvu_mbox_handler_npa_aq_enq(struct rvu *rvu,
                                struct npa_aq_enq_req *req,
                                struct npa_aq_enq_rsp *rsp)
 {
        return rvu_npa_aq_enq_inst(rvu, req, rsp);
 }
+#endif
 
 int rvu_mbox_handler_npa_hwctx_disable(struct rvu *rvu,
                                       struct hwctx_disable_req *req,
@@ -427,6 +465,10 @@ static int npa_aq_init(struct rvu *rvu, struct rvu_block *block)
        /* Do not bypass NDC cache */
        cfg = rvu_read64(rvu, block->addr, NPA_AF_NDC_CFG);
        cfg &= ~0x03DULL;
+#ifdef CONFIG_NDC_DIS_DYNAMIC_CACHING
+       /* Disable caching of stack pages */
+       cfg |= 0x10ULL;
+#endif
        rvu_write64(rvu, block->addr, NPA_AF_NDC_CFG, cfg);
 
        /* Result structure can be followed by Aura/Pool context at