]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
dmaengine: idxd: add descriptor manipulation routines
authorDave Jiang <dave.jiang@intel.com>
Tue, 21 Jan 2020 23:44:17 +0000 (16:44 -0700)
committerVinod Koul <vkoul@kernel.org>
Fri, 24 Jan 2020 05:48:45 +0000 (11:18 +0530)
This commit adds helper functions for DSA descriptor allocation,
submission, and free operations.

Signed-off-by: Dave Jiang <dave.jiang@intel.com>
Link: https://lore.kernel.org/r/157965025757.73301.12692876585357550065.stgit@djiang5-desk3.ch.intel.com
Signed-off-by: Vinod Koul <vkoul@kernel.org>
drivers/dma/idxd/Makefile
drivers/dma/idxd/idxd.h
drivers/dma/idxd/submit.c [new file with mode: 0644]

index a552560a03dc3911b5061a85eec91f9e8fa10b4c..50eca12015e2a4fecd1a9bd48ccb818bcb8b8fe6 100644 (file)
@@ -1,2 +1,2 @@
 obj-$(CONFIG_INTEL_IDXD) += idxd.o
-idxd-y := init.o irq.o device.o sysfs.o
+idxd-y := init.o irq.o device.o sysfs.o submit.o
index 909926aefd3e037a4719c1ee1c3fd4df111ce90d..d369b75468e3b7f624541e8944b0b4116cccec4d 100644 (file)
@@ -68,6 +68,11 @@ enum idxd_wq_type {
 #define WQ_NAME_SIZE   1024
 #define WQ_TYPE_SIZE   10
 
+enum idxd_op_type {
+       IDXD_OP_BLOCK = 0,
+       IDXD_OP_NONBLOCK = 1,
+};
+
 struct idxd_wq {
        void __iomem *dportal;
        struct device conf_dev;
@@ -246,4 +251,9 @@ int idxd_wq_disable(struct idxd_wq *wq);
 int idxd_wq_map_portal(struct idxd_wq *wq);
 void idxd_wq_unmap_portal(struct idxd_wq *wq);
 
+/* submission */
+int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc);
+struct idxd_desc *idxd_alloc_desc(struct idxd_wq *wq, enum idxd_op_type optype);
+void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc);
+
 #endif
diff --git a/drivers/dma/idxd/submit.c b/drivers/dma/idxd/submit.c
new file mode 100644 (file)
index 0000000..a405f06
--- /dev/null
@@ -0,0 +1,91 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <uapi/linux/idxd.h>
+#include "idxd.h"
+#include "registers.h"
+
+struct idxd_desc *idxd_alloc_desc(struct idxd_wq *wq, enum idxd_op_type optype)
+{
+       struct idxd_desc *desc;
+       int idx;
+       struct idxd_device *idxd = wq->idxd;
+
+       if (idxd->state != IDXD_DEV_ENABLED)
+               return ERR_PTR(-EIO);
+
+       if (optype == IDXD_OP_BLOCK)
+               percpu_down_read(&wq->submit_lock);
+       else if (!percpu_down_read_trylock(&wq->submit_lock))
+               return ERR_PTR(-EBUSY);
+
+       if (!atomic_add_unless(&wq->dq_count, 1, wq->size)) {
+               int rc;
+
+               if (optype == IDXD_OP_NONBLOCK) {
+                       percpu_up_read(&wq->submit_lock);
+                       return ERR_PTR(-EAGAIN);
+               }
+
+               percpu_up_read(&wq->submit_lock);
+               percpu_down_write(&wq->submit_lock);
+               rc = wait_event_interruptible(wq->submit_waitq,
+                                             atomic_add_unless(&wq->dq_count,
+                                                               1, wq->size) ||
+                                              idxd->state != IDXD_DEV_ENABLED);
+               percpu_up_write(&wq->submit_lock);
+               if (rc < 0)
+                       return ERR_PTR(-EINTR);
+               if (idxd->state != IDXD_DEV_ENABLED)
+                       return ERR_PTR(-EIO);
+       } else {
+               percpu_up_read(&wq->submit_lock);
+       }
+
+       idx = sbitmap_get(&wq->sbmap, 0, false);
+       if (idx < 0) {
+               atomic_dec(&wq->dq_count);
+               return ERR_PTR(-EAGAIN);
+       }
+
+       desc = wq->descs[idx];
+       memset(desc->hw, 0, sizeof(struct dsa_hw_desc));
+       memset(desc->completion, 0, sizeof(struct dsa_completion_record));
+       return desc;
+}
+
+void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc)
+{
+       atomic_dec(&wq->dq_count);
+
+       sbitmap_clear_bit(&wq->sbmap, desc->id);
+       wake_up(&wq->submit_waitq);
+}
+
+int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
+{
+       struct idxd_device *idxd = wq->idxd;
+       int vec = desc->hw->int_handle;
+
+       if (idxd->state != IDXD_DEV_ENABLED)
+               return -EIO;
+
+       /*
+        * The wmb() flushes writes to coherent DMA data before possibly
+        * triggering a DMA read. The wmb() is necessary even on UP because
+        * the recipient is a device.
+        */
+       wmb();
+       iosubmit_cmds512(wq->dportal, desc->hw, 1);
+
+       /*
+        * Pending the descriptor to the lockless list for the irq_entry
+        * that we designated the descriptor to.
+        */
+       llist_add(&desc->llnode, &idxd->irq_entries[vec].pending_llist);
+
+       return 0;
+}