]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
Merge branch 'fixes' into next
authorVinod Koul <vkoul@kernel.org>
Thu, 14 Nov 2019 10:32:51 +0000 (16:02 +0530)
committerVinod Koul <vkoul@kernel.org>
Thu, 14 Nov 2019 10:32:51 +0000 (16:02 +0530)
40 files changed:
Documentation/devicetree/bindings/dma/dma-common.yaml
Documentation/devicetree/bindings/dma/jz4780-dma.txt
Documentation/devicetree/bindings/dma/milbeaut-m10v-hdmac.txt [new file with mode: 0644]
Documentation/devicetree/bindings/dma/milbeaut-m10v-xdmac.txt [new file with mode: 0644]
Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt
Documentation/devicetree/bindings/dma/sifive,fu540-c000-pdma.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/dma/ti-edma.txt
Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt
MAINTAINERS
drivers/dma/Kconfig
drivers/dma/Makefile
drivers/dma/at_xdmac.c
drivers/dma/dma-jz4780.c
drivers/dma/dw/platform.c
drivers/dma/fsl-dpaa2-qdma/Kconfig [new file with mode: 0644]
drivers/dma/fsl-dpaa2-qdma/Makefile [new file with mode: 0644]
drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c [new file with mode: 0644]
drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.h [new file with mode: 0644]
drivers/dma/fsl-dpaa2-qdma/dpdmai.c [new file with mode: 0644]
drivers/dma/fsl-dpaa2-qdma/dpdmai.h [new file with mode: 0644]
drivers/dma/fsl-qdma.c
drivers/dma/iop-adma.c
drivers/dma/k3dma.c
drivers/dma/mediatek/mtk-cqdma.c
drivers/dma/mediatek/mtk-hsdma.c
drivers/dma/mediatek/mtk-uart-apdma.c
drivers/dma/milbeaut-hdmac.c [new file with mode: 0644]
drivers/dma/milbeaut-xdmac.c [new file with mode: 0644]
drivers/dma/owl-dma.c
drivers/dma/sf-pdma/Kconfig [new file with mode: 0644]
drivers/dma/sf-pdma/Makefile [new file with mode: 0644]
drivers/dma/sf-pdma/sf-pdma.c [new file with mode: 0644]
drivers/dma/sf-pdma/sf-pdma.h [new file with mode: 0644]
drivers/dma/sh/rcar-dmac.c
drivers/dma/sprd-dma.c
drivers/dma/ti/edma.c
drivers/dma/uniphier-mdmac.c
drivers/dma/xilinx/xilinx_dma.c
drivers/dma/zx_dma.c
include/dt-bindings/dma/x1000-dma.h [new file with mode: 0644]

index ed0a49a6f02080dbd16423f3cc278594a4f2a8b8..02a34ba2b49bbc8ceceb3e56940169901a44e40c 100644 (file)
@@ -25,11 +25,18 @@ properties:
       Used to provide DMA controller specific information.
 
   dma-channel-mask:
-    $ref: /schemas/types.yaml#definitions/uint32
     description:
       Bitmask of available DMA channels in ascending order that are
       not reserved by firmware and are available to the
       kernel. i.e. first channel corresponds to LSB.
+      The first item in the array is for channels 0-31, the second is for
+      channels 32-63, etc.
+    allOf:
+      - $ref: /schemas/types.yaml#/definitions/uint32-array
+    items:
+      minItems: 1
+      # Should be enough
+      maxItems: 255
 
   dma-channels:
     $ref: /schemas/types.yaml#definitions/uint32
index 636fcb26b164ea78ef14c1de8031cf44c892d0c8..ec89782d949884829b76e85f0979f1d0d5b9f41a 100644 (file)
@@ -7,10 +7,11 @@ Required properties:
   * ingenic,jz4725b-dma
   * ingenic,jz4770-dma
   * ingenic,jz4780-dma
+  * ingenic,x1000-dma
 - reg: Should contain the DMA channel registers location and length, followed
   by the DMA controller registers location and length.
 - interrupts: Should contain the interrupt specifier of the DMA controller.
-- clocks: Should contain a clock specifier for the JZ4780 PDMA clock.
+- clocks: Should contain a clock specifier for the JZ4780/X1000 PDMA clock.
 - #dma-cells: Must be <2>. Number of integer cells in the dmas property of
   DMA clients (see below).
 
diff --git a/Documentation/devicetree/bindings/dma/milbeaut-m10v-hdmac.txt b/Documentation/devicetree/bindings/dma/milbeaut-m10v-hdmac.txt
new file mode 100644 (file)
index 0000000..1f0875b
--- /dev/null
@@ -0,0 +1,32 @@
+* Milbeaut AHB DMA Controller
+
+Milbeaut AHB DMA controller has transfer capability below.
+ - device to memory transfer
+ - memory to device transfer
+
+Required property:
+- compatible:       Should be  "socionext,milbeaut-m10v-hdmac"
+- reg:              Should contain DMA registers location and length.
+- interrupts:       Should contain all of the per-channel DMA interrupts.
+                     Number of channels is configurable - 2, 4 or 8, so
+                     the number of interrupts specified should be {2,4,8}.
+- #dma-cells:       Should be 1. Specify the ID of the slave.
+- clocks:           Phandle to the clock used by the HDMAC module.
+
+
+Example:
+
+       hdmac1: dma-controller@1e110000 {
+               compatible = "socionext,milbeaut-m10v-hdmac";
+               reg = <0x1e110000 0x10000>;
+               interrupts = <0 132 4>,
+                            <0 133 4>,
+                            <0 134 4>,
+                            <0 135 4>,
+                            <0 136 4>,
+                            <0 137 4>,
+                            <0 138 4>,
+                            <0 139 4>;
+               #dma-cells = <1>;
+               clocks = <&dummy_clk>;
+       };
diff --git a/Documentation/devicetree/bindings/dma/milbeaut-m10v-xdmac.txt b/Documentation/devicetree/bindings/dma/milbeaut-m10v-xdmac.txt
new file mode 100644 (file)
index 0000000..3057918
--- /dev/null
@@ -0,0 +1,24 @@
+* Milbeaut AXI DMA Controller
+
+Milbeaut AXI DMA controller has only memory to memory transfer capability.
+
+* DMA controller
+
+Required property:
+- compatible:  Should be  "socionext,milbeaut-m10v-xdmac"
+- reg:         Should contain DMA registers location and length.
+- interrupts:  Should contain all of the per-channel DMA interrupts.
+                Number of channels is configurable - 2, 4 or 8, so
+                the number of interrupts specified should be {2,4,8}.
+- #dma-cells:  Should be 1.
+
+Example:
+       xdmac0: dma-controller@1c250000 {
+               compatible = "socionext,milbeaut-m10v-xdmac";
+               reg = <0x1c250000 0x1000>;
+               interrupts = <0 17 0x4>,
+                            <0 18 0x4>,
+                            <0 19 0x4>,
+                            <0 20 0x4>;
+               #dma-cells = <1>;
+       };
index 5a512c5ea76a1cf88a9a5a2a5d310a28074330bc..5551e929fd99f6305333362df63034ac8e5aa48e 100644 (file)
@@ -21,6 +21,7 @@ Required Properties:
                - "renesas,dmac-r8a7745" (RZ/G1E)
                - "renesas,dmac-r8a77470" (RZ/G1C)
                - "renesas,dmac-r8a774a1" (RZ/G2M)
+               - "renesas,dmac-r8a774b1" (RZ/G2N)
                - "renesas,dmac-r8a774c0" (RZ/G2E)
                - "renesas,dmac-r8a7790" (R-Car H2)
                - "renesas,dmac-r8a7791" (R-Car M2-W)
diff --git a/Documentation/devicetree/bindings/dma/sifive,fu540-c000-pdma.yaml b/Documentation/devicetree/bindings/dma/sifive,fu540-c000-pdma.yaml
new file mode 100644 (file)
index 0000000..2ca3ddb
--- /dev/null
@@ -0,0 +1,55 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/dma/sifive,fu540-c000-pdma.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: SiFive Unleashed Rev C000 Platform DMA
+
+maintainers:
+  - Green Wan <green.wan@sifive.com>
+  - Palmer Debbelt <palmer@sifive.com>
+  - Paul Walmsley <paul.walmsley@sifive.com>
+
+description: |
+  Platform DMA is a DMA engine of SiFive Unleashed. It supports 4
+  channels. Each channel has 2 interrupts. One is for DMA done and
+  the other is for DME error.
+
+  In different SoC, DMA could be attached to different IRQ line.
+  DT file need to be changed to meet the difference. For technical
+  doc,
+
+  https://static.dev.sifive.com/FU540-C000-v1.0.pdf
+
+properties:
+  compatible:
+    items:
+      - const: sifive,fu540-c000-pdma
+
+  reg:
+    maxItems: 1
+
+  interrupts:
+    minItems: 1
+    maxItems: 8
+
+  '#dma-cells':
+    const: 1
+
+required:
+  - compatible
+  - reg
+  - interrupts
+  - '#dma-cells'
+
+examples:
+  - |
+    dma@3000000 {
+      compatible = "sifive,fu540-c000-pdma";
+      reg = <0x0 0x3000000 0x0 0x8000>;
+      interrupts = <23 24 25 26 27 28 29 30>;
+      #dma-cells = <1>;
+    };
+
+...
index 4bbc94d829c8aa8c54f13de98e7a526dc72c0eb6..0e1398f93aa235c658854a08cf330ce1c8989d9c 100644 (file)
@@ -42,6 +42,11 @@ Optional properties:
 - ti,edma-reserved-slot-ranges: PaRAM slot ranges which should not be used by
                the driver, they are allocated to be used by for example the
                DSP. See example.
+- dma-channel-mask: Mask of usable channels.
+               Single uint32 for EDMA with 32 channels, array of two uint32 for
+               EDMA with 64 channels. See example and
+               Documentation/devicetree/bindings/dma/dma-common.yaml
+
 
 ------------------------------------------------------------------------------
 eDMA3 Transfer Controller
@@ -91,6 +96,9 @@ edma: edma@49000000 {
        ti,edma-memcpy-channels = <20 21>;
        /* The following PaRAM slots are reserved: 35-44 and 100-109 */
        ti,edma-reserved-slot-ranges = <35 10>, <100 10>;
+       /* The following channels are reserved: 35-44 */
+       dma-channel-mask = <0xffffffff /* Channel 0-31 */
+                           0xffffe007>; /* Channel 32-63 */
 };
 
 edma_tptc0: tptc@49800000 {
index 93b6d961dd4fa81b155813c2cac3e4f46696b333..325aca52cd438b8508ac4608050759e5e7c86dc0 100644 (file)
@@ -11,9 +11,16 @@ is to receive from the device.
 Xilinx AXI CDMA engine, it does transfers between memory-mapped source
 address and a memory-mapped destination address.
 
+Xilinx AXI MCDMA engine, it does transfer between memory and AXI4 stream
+target devices. It can be configured to have up to 16 independent transmit
+and receive channels.
+
 Required properties:
-- compatible: Should be "xlnx,axi-vdma-1.00.a" or "xlnx,axi-dma-1.00.a" or
-             "xlnx,axi-cdma-1.00.a""
+- compatible: Should be one of-
+               "xlnx,axi-vdma-1.00.a"
+               "xlnx,axi-dma-1.00.a"
+               "xlnx,axi-cdma-1.00.a"
+               "xlnx,axi-mcdma-1.00.a"
 - #dma-cells: Should be <1>, see "dmas" property below
 - reg: Should contain VDMA registers location and length.
 - xlnx,addrwidth: Should be the vdma addressing size in bits(ex: 32 bits).
@@ -29,7 +36,7 @@ Required properties:
                           "m_axis_mm2s_aclk", "s_axis_s2mm_aclk"
        For CDMA:
        Required elements: "s_axi_lite_aclk", "m_axi_aclk"
-       FOR AXIDMA:
+       For AXIDMA and MCDMA:
        Required elements: "s_axi_lite_aclk"
        Optional elements: "m_axi_mm2s_aclk", "m_axi_s2mm_aclk",
                           "m_axi_sg_aclk"
@@ -37,12 +44,11 @@ Required properties:
 Required properties for VDMA:
 - xlnx,num-fstores: Should be the number of framebuffers as configured in h/w.
 
-Optional properties for AXI DMA:
+Optional properties for AXI DMA and MCDMA:
 - xlnx,sg-length-width: Should be set to the width in bits of the length
        register as configured in h/w. Takes values {8...26}. If the property
        is missing or invalid then the default value 23 is used. This is the
        maximum value that is supported by all IP versions.
-- xlnx,mcdma: Tells whether configured for multi-channel mode in the hardware.
 Optional properties for VDMA:
 - xlnx,flush-fsync: Tells which channel to Flush on Frame sync.
        It takes following values:
@@ -55,8 +61,8 @@ Required child node properties:
        For VDMA: It should be either "xlnx,axi-vdma-mm2s-channel" or
        "xlnx,axi-vdma-s2mm-channel".
        For CDMA: It should be "xlnx,axi-cdma-channel".
-       For AXIDMA: It should be either "xlnx,axi-dma-mm2s-channel" or
-       "xlnx,axi-dma-s2mm-channel".
+       For AXIDMA and MCDMA: It should be either "xlnx,axi-dma-mm2s-channel"
+       or "xlnx,axi-dma-s2mm-channel".
 - interrupts: Should contain per channel VDMA interrupts.
 - xlnx,datawidth: Should contain the stream data width, take values
        {32,64...1024}.
@@ -69,8 +75,8 @@ Optional child node properties for VDMA:
        enabled/disabled in hardware.
 - xlnx,enable-vert-flip: Tells vertical flip is
        enabled/disabled in hardware(S2MM path).
-Optional child node properties for AXI DMA:
--dma-channels: Number of dma channels in child node.
+Optional child node properties for MCDMA:
+- dma-channels: Number of dma channels in child node.
 
 Example:
 ++++++++
index 296de2b51c832ecc1869a77624c964ce398a5236..042bdcc3480b225e8b8f7d8b966d01d997790ec0 100644 (file)
@@ -14776,6 +14776,12 @@ F:     drivers/media/usb/siano/
 F:     drivers/media/usb/siano/
 F:     drivers/media/mmc/siano/
 
+SIFIVE PDMA DRIVER
+M:     Green Wan <green.wan@sifive.com>
+S:     Maintained
+F:     drivers/dma/sf-pdma/
+F:     Documentation/devicetree/bindings/dma/sifive,fu540-c000-pdma.yaml
+
 SIFIVE DRIVERS
 M:     Palmer Dabbelt <palmer@sifive.com>
 M:     Paul Walmsley <paul.walmsley@sifive.com>
index 7af874b69ffb9e3ea2cfa1cbcc7c4b46be7a4a39..03dfee5c66d9237e539c89ab8376602a6d2b2d05 100644 (file)
@@ -342,6 +342,26 @@ config MCF_EDMA
          minimal intervention from a host processor.
          This module can be found on Freescale ColdFire mcf5441x SoCs.
 
+config MILBEAUT_HDMAC
+       tristate "Milbeaut AHB DMA support"
+       depends on ARCH_MILBEAUT || COMPILE_TEST
+       depends on OF
+       select DMA_ENGINE
+       select DMA_VIRTUAL_CHANNELS
+       help
+         Say yes here to support the Socionext Milbeaut
+         HDMAC device.
+
+config MILBEAUT_XDMAC
+       tristate "Milbeaut AXI DMA support"
+       depends on ARCH_MILBEAUT || COMPILE_TEST
+       depends on OF
+       select DMA_ENGINE
+       select DMA_VIRTUAL_CHANNELS
+       help
+         Say yes here to support the Socionext Milbeaut
+         XDMAC device.
+
 config MMP_PDMA
        bool "MMP PDMA support"
        depends on ARCH_MMP || ARCH_PXA || COMPILE_TEST
@@ -635,6 +655,10 @@ config XILINX_DMA
          destination address.
          AXI DMA engine provides high-bandwidth one dimensional direct
          memory access between memory and AXI4-Stream target peripherals.
+         AXI MCDMA engine provides high-bandwidth direct memory access
+         between memory and AXI4-Stream target peripherals. It provides
+         the scatter gather interface with multiple channels independent
+         configuration support.
 
 config XILINX_ZYNQMP_DMA
        tristate "Xilinx ZynqMP DMA Engine"
@@ -665,10 +689,14 @@ source "drivers/dma/dw-edma/Kconfig"
 
 source "drivers/dma/hsu/Kconfig"
 
+source "drivers/dma/sf-pdma/Kconfig"
+
 source "drivers/dma/sh/Kconfig"
 
 source "drivers/dma/ti/Kconfig"
 
+source "drivers/dma/fsl-dpaa2-qdma/Kconfig"
+
 # clients
 comment "DMA Clients"
        depends on DMA_ENGINE
index f5ce8665e944e29151e30d040309623956d859c2..42d7e2fc64faf8a4ac7100f78e060a598eb11eec 100644 (file)
@@ -45,6 +45,8 @@ obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o
 obj-$(CONFIG_INTEL_MIC_X100_DMA) += mic_x100_dma.o
 obj-$(CONFIG_K3_DMA) += k3dma.o
 obj-$(CONFIG_LPC18XX_DMAMUX) += lpc18xx-dmamux.o
+obj-$(CONFIG_MILBEAUT_HDMAC) += milbeaut-hdmac.o
+obj-$(CONFIG_MILBEAUT_XDMAC) += milbeaut-xdmac.o
 obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o
 obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o
 obj-$(CONFIG_MOXART_DMA) += moxart-dma.o
@@ -60,6 +62,7 @@ obj-$(CONFIG_PL330_DMA) += pl330.o
 obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/
 obj-$(CONFIG_PXA_DMA) += pxa_dma.o
 obj-$(CONFIG_RENESAS_DMA) += sh/
+obj-$(CONFIG_SF_PDMA) += sf-pdma/
 obj-$(CONFIG_SIRF_DMA) += sirf-dma.o
 obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o
 obj-$(CONFIG_STM32_DMA) += stm32-dma.o
@@ -75,6 +78,7 @@ obj-$(CONFIG_UNIPHIER_MDMAC) += uniphier-mdmac.o
 obj-$(CONFIG_XGENE_DMA) += xgene-dma.o
 obj-$(CONFIG_ZX_DMA) += zx_dma.o
 obj-$(CONFIG_ST_FDMA) += st_fdma.o
+obj-$(CONFIG_FSL_DPAA2_QDMA) += fsl-dpaa2-qdma/
 
 obj-y += mediatek/
 obj-y += qcom/
index b58ac720d9a122099dd6f9c7e60f28cc215dc0dc..f71c9f77d40594e08f02642d73038ec1eadf4216 100644 (file)
@@ -1957,21 +1957,16 @@ static int atmel_xdmac_resume(struct device *dev)
 
 static int at_xdmac_probe(struct platform_device *pdev)
 {
-       struct resource *res;
        struct at_xdmac *atxdmac;
        int             irq, size, nr_channels, i, ret;
        void __iomem    *base;
        u32             reg;
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res)
-               return -EINVAL;
-
        irq = platform_get_irq(pdev, 0);
        if (irq < 0)
                return irq;
 
-       base = devm_ioremap_resource(&pdev->dev, res);
+       base = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(base))
                return PTR_ERR(base);
 
index cafb1cc065bb76f397541f54bb597982dbc4ece4..fa626acdc9b961919208a316d719ad9d2f25118b 100644 (file)
@@ -858,13 +858,7 @@ static int jz4780_dma_probe(struct platform_device *pdev)
        jzdma->soc_data = soc_data;
        platform_set_drvdata(pdev, jzdma);
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               dev_err(dev, "failed to get I/O memory\n");
-               return -EINVAL;
-       }
-
-       jzdma->chn_base = devm_ioremap_resource(dev, res);
+       jzdma->chn_base = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(jzdma->chn_base))
                return PTR_ERR(jzdma->chn_base);
 
@@ -987,6 +981,7 @@ static int jz4780_dma_remove(struct platform_device *pdev)
 
        of_dma_controller_free(pdev->dev.of_node);
 
+       clk_disable_unprepare(jzdma->clk);
        free_irq(jzdma->irq, jzdma);
 
        for (i = 0; i < jzdma->soc_data->nb_channels; i++)
@@ -1019,11 +1014,18 @@ static const struct jz4780_dma_soc_data jz4780_dma_soc_data = {
        .flags = JZ_SOC_DATA_ALLOW_LEGACY_DT | JZ_SOC_DATA_PROGRAMMABLE_DMA,
 };
 
+static const struct jz4780_dma_soc_data x1000_dma_soc_data = {
+       .nb_channels = 8,
+       .transfer_ord_max = 7,
+       .flags = JZ_SOC_DATA_PROGRAMMABLE_DMA,
+};
+
 static const struct of_device_id jz4780_dma_dt_match[] = {
        { .compatible = "ingenic,jz4740-dma", .data = &jz4740_dma_soc_data },
        { .compatible = "ingenic,jz4725b-dma", .data = &jz4725b_dma_soc_data },
        { .compatible = "ingenic,jz4770-dma", .data = &jz4770_dma_soc_data },
        { .compatible = "ingenic,jz4780-dma", .data = &jz4780_dma_soc_data },
+       { .compatible = "ingenic,x1000-dma", .data = &x1000_dma_soc_data },
        {},
 };
 MODULE_DEVICE_TABLE(of, jz4780_dma_dt_match);
index c90c798e5ec347f8b2ddf4dbb4a6bd650834d46b..0585d749d935c5377fde18fef387ff6838efca81 100644 (file)
@@ -66,7 +66,7 @@ static int dw_probe(struct platform_device *pdev)
 
        data->chip = chip;
 
-       chip->clk = devm_clk_get(chip->dev, "hclk");
+       chip->clk = devm_clk_get_optional(chip->dev, "hclk");
        if (IS_ERR(chip->clk))
                return PTR_ERR(chip->clk);
        err = clk_prepare_enable(chip->clk);
diff --git a/drivers/dma/fsl-dpaa2-qdma/Kconfig b/drivers/dma/fsl-dpaa2-qdma/Kconfig
new file mode 100644 (file)
index 0000000..258ed6b
--- /dev/null
@@ -0,0 +1,9 @@
+menuconfig FSL_DPAA2_QDMA
+       tristate "NXP DPAA2 QDMA"
+       depends on ARM64
+       depends on FSL_MC_BUS && FSL_MC_DPIO
+       select DMA_ENGINE
+       select DMA_VIRTUAL_CHANNELS
+       help
+         NXP Data Path Acceleration Architecture 2 QDMA driver,
+         using the NXP MC bus driver.
diff --git a/drivers/dma/fsl-dpaa2-qdma/Makefile b/drivers/dma/fsl-dpaa2-qdma/Makefile
new file mode 100644 (file)
index 0000000..c1d0226
--- /dev/null
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+# Makefile for the NXP DPAA2 qDMA controllers
+obj-$(CONFIG_FSL_DPAA2_QDMA) += dpaa2-qdma.o dpdmai.o
diff --git a/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c b/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c
new file mode 100644 (file)
index 0000000..c70a796
--- /dev/null
@@ -0,0 +1,825 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright 2019 NXP
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/dmapool.h>
+#include <linux/of_irq.h>
+#include <linux/iommu.h>
+#include <linux/sys_soc.h>
+#include <linux/fsl/mc.h>
+#include <soc/fsl/dpaa2-io.h>
+
+#include "../virt-dma.h"
+#include "dpdmai.h"
+#include "dpaa2-qdma.h"
+
+static bool smmu_disable = true;
+
+static struct dpaa2_qdma_chan *to_dpaa2_qdma_chan(struct dma_chan *chan)
+{
+       return container_of(chan, struct dpaa2_qdma_chan, vchan.chan);
+}
+
+static struct dpaa2_qdma_comp *to_fsl_qdma_comp(struct virt_dma_desc *vd)
+{
+       return container_of(vd, struct dpaa2_qdma_comp, vdesc);
+}
+
+static int dpaa2_qdma_alloc_chan_resources(struct dma_chan *chan)
+{
+       struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
+       struct dpaa2_qdma_engine *dpaa2_qdma = dpaa2_chan->qdma;
+       struct device *dev = &dpaa2_qdma->priv->dpdmai_dev->dev;
+
+       dpaa2_chan->fd_pool = dma_pool_create("fd_pool", dev,
+                                             sizeof(struct dpaa2_fd),
+                                             sizeof(struct dpaa2_fd), 0);
+       if (!dpaa2_chan->fd_pool)
+               goto err;
+
+       dpaa2_chan->fl_pool = dma_pool_create("fl_pool", dev,
+                                             sizeof(struct dpaa2_fl_entry),
+                                             sizeof(struct dpaa2_fl_entry), 0);
+       if (!dpaa2_chan->fl_pool)
+               goto err_fd;
+
+       dpaa2_chan->sdd_pool =
+               dma_pool_create("sdd_pool", dev,
+                               sizeof(struct dpaa2_qdma_sd_d),
+                               sizeof(struct dpaa2_qdma_sd_d), 0);
+       if (!dpaa2_chan->sdd_pool)
+               goto err_fl;
+
+       return dpaa2_qdma->desc_allocated++;
+err_fl:
+       dma_pool_destroy(dpaa2_chan->fl_pool);
+err_fd:
+       dma_pool_destroy(dpaa2_chan->fd_pool);
+err:
+       return -ENOMEM;
+}
+
+static void dpaa2_qdma_free_chan_resources(struct dma_chan *chan)
+{
+       struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
+       struct dpaa2_qdma_engine *dpaa2_qdma = dpaa2_chan->qdma;
+       unsigned long flags;
+
+       LIST_HEAD(head);
+
+       spin_lock_irqsave(&dpaa2_chan->vchan.lock, flags);
+       vchan_get_all_descriptors(&dpaa2_chan->vchan, &head);
+       spin_unlock_irqrestore(&dpaa2_chan->vchan.lock, flags);
+
+       vchan_dma_desc_free_list(&dpaa2_chan->vchan, &head);
+
+       dpaa2_dpdmai_free_comp(dpaa2_chan, &dpaa2_chan->comp_used);
+       dpaa2_dpdmai_free_comp(dpaa2_chan, &dpaa2_chan->comp_free);
+
+       dma_pool_destroy(dpaa2_chan->fd_pool);
+       dma_pool_destroy(dpaa2_chan->fl_pool);
+       dma_pool_destroy(dpaa2_chan->sdd_pool);
+       dpaa2_qdma->desc_allocated--;
+}
+
+/*
+ * Request a command descriptor for enqueue.
+ */
+static struct dpaa2_qdma_comp *
+dpaa2_qdma_request_desc(struct dpaa2_qdma_chan *dpaa2_chan)
+{
+       struct dpaa2_qdma_priv *qdma_priv = dpaa2_chan->qdma->priv;
+       struct device *dev = &qdma_priv->dpdmai_dev->dev;
+       struct dpaa2_qdma_comp *comp_temp = NULL;
+       unsigned long flags;
+
+       spin_lock_irqsave(&dpaa2_chan->queue_lock, flags);
+       if (list_empty(&dpaa2_chan->comp_free)) {
+               spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
+               comp_temp = kzalloc(sizeof(*comp_temp), GFP_NOWAIT);
+               if (!comp_temp)
+                       goto err;
+               comp_temp->fd_virt_addr =
+                       dma_pool_alloc(dpaa2_chan->fd_pool, GFP_NOWAIT,
+                                      &comp_temp->fd_bus_addr);
+               if (!comp_temp->fd_virt_addr)
+                       goto err_comp;
+
+               comp_temp->fl_virt_addr =
+                       dma_pool_alloc(dpaa2_chan->fl_pool, GFP_NOWAIT,
+                                      &comp_temp->fl_bus_addr);
+               if (!comp_temp->fl_virt_addr)
+                       goto err_fd_virt;
+
+               comp_temp->desc_virt_addr =
+                       dma_pool_alloc(dpaa2_chan->sdd_pool, GFP_NOWAIT,
+                                      &comp_temp->desc_bus_addr);
+               if (!comp_temp->desc_virt_addr)
+                       goto err_fl_virt;
+
+               comp_temp->qchan = dpaa2_chan;
+               return comp_temp;
+       }
+
+       comp_temp = list_first_entry(&dpaa2_chan->comp_free,
+                                    struct dpaa2_qdma_comp, list);
+       list_del(&comp_temp->list);
+       spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
+
+       comp_temp->qchan = dpaa2_chan;
+
+       return comp_temp;
+
+err_fl_virt:
+               dma_pool_free(dpaa2_chan->fl_pool,
+                             comp_temp->fl_virt_addr,
+                             comp_temp->fl_bus_addr);
+err_fd_virt:
+               dma_pool_free(dpaa2_chan->fd_pool,
+                             comp_temp->fd_virt_addr,
+                             comp_temp->fd_bus_addr);
+err_comp:
+       kfree(comp_temp);
+err:
+       dev_err(dev, "Failed to request descriptor\n");
+       return NULL;
+}
+
+static void
+dpaa2_qdma_populate_fd(u32 format, struct dpaa2_qdma_comp *dpaa2_comp)
+{
+       struct dpaa2_fd *fd;
+
+       fd = dpaa2_comp->fd_virt_addr;
+       memset(fd, 0, sizeof(struct dpaa2_fd));
+
+       /* fd populated */
+       dpaa2_fd_set_addr(fd, dpaa2_comp->fl_bus_addr);
+
+       /*
+        * Bypass memory translation, Frame list format, short length disable
+        * we need to disable BMT if fsl-mc use iova addr
+        */
+       if (smmu_disable)
+               dpaa2_fd_set_bpid(fd, QMAN_FD_BMT_ENABLE);
+       dpaa2_fd_set_format(fd, QMAN_FD_FMT_ENABLE | QMAN_FD_SL_DISABLE);
+
+       dpaa2_fd_set_frc(fd, format | QDMA_SER_CTX);
+}
+
+/* first frame list for descriptor buffer */
+static void
+dpaa2_qdma_populate_first_framel(struct dpaa2_fl_entry *f_list,
+                                struct dpaa2_qdma_comp *dpaa2_comp,
+                                bool wrt_changed)
+{
+       struct dpaa2_qdma_sd_d *sdd;
+
+       sdd = dpaa2_comp->desc_virt_addr;
+       memset(sdd, 0, 2 * (sizeof(*sdd)));
+
+       /* source descriptor CMD */
+       sdd->cmd = cpu_to_le32(QDMA_SD_CMD_RDTTYPE_COHERENT);
+       sdd++;
+
+       /* dest descriptor CMD */
+       if (wrt_changed)
+               sdd->cmd = cpu_to_le32(LX2160_QDMA_DD_CMD_WRTTYPE_COHERENT);
+       else
+               sdd->cmd = cpu_to_le32(QDMA_DD_CMD_WRTTYPE_COHERENT);
+
+       memset(f_list, 0, sizeof(struct dpaa2_fl_entry));
+
+       /* first frame list to source descriptor */
+       dpaa2_fl_set_addr(f_list, dpaa2_comp->desc_bus_addr);
+       dpaa2_fl_set_len(f_list, 0x20);
+       dpaa2_fl_set_format(f_list, QDMA_FL_FMT_SBF | QDMA_FL_SL_LONG);
+
+       /* bypass memory translation */
+       if (smmu_disable)
+               f_list->bpid = cpu_to_le16(QDMA_FL_BMT_ENABLE);
+}
+
+/* source and destination frame list */
+static void
+dpaa2_qdma_populate_frames(struct dpaa2_fl_entry *f_list,
+                          dma_addr_t dst, dma_addr_t src,
+                          size_t len, uint8_t fmt)
+{
+       /* source frame list to source buffer */
+       memset(f_list, 0, sizeof(struct dpaa2_fl_entry));
+
+       dpaa2_fl_set_addr(f_list, src);
+       dpaa2_fl_set_len(f_list, len);
+
+       /* single buffer frame or scatter gather frame */
+       dpaa2_fl_set_format(f_list, (fmt | QDMA_FL_SL_LONG));
+
+       /* bypass memory translation */
+       if (smmu_disable)
+               f_list->bpid = cpu_to_le16(QDMA_FL_BMT_ENABLE);
+
+       f_list++;
+
+       /* destination frame list to destination buffer */
+       memset(f_list, 0, sizeof(struct dpaa2_fl_entry));
+
+       dpaa2_fl_set_addr(f_list, dst);
+       dpaa2_fl_set_len(f_list, len);
+       dpaa2_fl_set_format(f_list, (fmt | QDMA_FL_SL_LONG));
+       /* single buffer frame or scatter gather frame */
+       dpaa2_fl_set_final(f_list, QDMA_FL_F);
+       /* bypass memory translation */
+       if (smmu_disable)
+               f_list->bpid = cpu_to_le16(QDMA_FL_BMT_ENABLE);
+}
+
+static struct dma_async_tx_descriptor
+*dpaa2_qdma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst,
+                       dma_addr_t src, size_t len, ulong flags)
+{
+       struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
+       struct dpaa2_qdma_engine *dpaa2_qdma;
+       struct dpaa2_qdma_comp *dpaa2_comp;
+       struct dpaa2_fl_entry *f_list;
+       bool wrt_changed;
+
+       dpaa2_qdma = dpaa2_chan->qdma;
+       dpaa2_comp = dpaa2_qdma_request_desc(dpaa2_chan);
+       if (!dpaa2_comp)
+               return NULL;
+
+       wrt_changed = (bool)dpaa2_qdma->qdma_wrtype_fixup;
+
+       /* populate Frame descriptor */
+       dpaa2_qdma_populate_fd(QDMA_FD_LONG_FORMAT, dpaa2_comp);
+
+       f_list = dpaa2_comp->fl_virt_addr;
+
+       /* first frame list for descriptor buffer (logn format) */
+       dpaa2_qdma_populate_first_framel(f_list, dpaa2_comp, wrt_changed);
+
+       f_list++;
+
+       dpaa2_qdma_populate_frames(f_list, dst, src, len, QDMA_FL_FMT_SBF);
+
+       return vchan_tx_prep(&dpaa2_chan->vchan, &dpaa2_comp->vdesc, flags);
+}
+
+static void dpaa2_qdma_issue_pending(struct dma_chan *chan)
+{
+       struct dpaa2_qdma_chan *dpaa2_chan = to_dpaa2_qdma_chan(chan);
+       struct dpaa2_qdma_comp *dpaa2_comp;
+       struct virt_dma_desc *vdesc;
+       struct dpaa2_fd *fd;
+       unsigned long flags;
+       int err;
+
+       spin_lock_irqsave(&dpaa2_chan->queue_lock, flags);
+       spin_lock(&dpaa2_chan->vchan.lock);
+       if (vchan_issue_pending(&dpaa2_chan->vchan)) {
+               vdesc = vchan_next_desc(&dpaa2_chan->vchan);
+               if (!vdesc)
+                       goto err_enqueue;
+               dpaa2_comp = to_fsl_qdma_comp(vdesc);
+
+               fd = dpaa2_comp->fd_virt_addr;
+
+               list_del(&vdesc->node);
+               list_add_tail(&dpaa2_comp->list, &dpaa2_chan->comp_used);
+
+               err = dpaa2_io_service_enqueue_fq(NULL, dpaa2_chan->fqid, fd);
+               if (err) {
+                       list_del(&dpaa2_comp->list);
+                       list_add_tail(&dpaa2_comp->list,
+                                     &dpaa2_chan->comp_free);
+               }
+       }
+err_enqueue:
+       spin_unlock(&dpaa2_chan->vchan.lock);
+       spin_unlock_irqrestore(&dpaa2_chan->queue_lock, flags);
+}
+
+static int __cold dpaa2_qdma_setup(struct fsl_mc_device *ls_dev)
+{
+       struct dpaa2_qdma_priv_per_prio *ppriv;
+       struct device *dev = &ls_dev->dev;
+       struct dpaa2_qdma_priv *priv;
+       u8 prio_def = DPDMAI_PRIO_NUM;
+       int err = -EINVAL;
+       int i;
+
+       priv = dev_get_drvdata(dev);
+
+       priv->dev = dev;
+       priv->dpqdma_id = ls_dev->obj_desc.id;
+
+       /* Get the handle for the DPDMAI this interface is associate with */
+       err = dpdmai_open(priv->mc_io, 0, priv->dpqdma_id, &ls_dev->mc_handle);
+       if (err) {
+               dev_err(dev, "dpdmai_open() failed\n");
+               return err;
+       }
+
+       dev_dbg(dev, "Opened dpdmai object successfully\n");
+
+       err = dpdmai_get_attributes(priv->mc_io, 0, ls_dev->mc_handle,
+                                   &priv->dpdmai_attr);
+       if (err) {
+               dev_err(dev, "dpdmai_get_attributes() failed\n");
+               goto exit;
+       }
+
+       if (priv->dpdmai_attr.version.major > DPDMAI_VER_MAJOR) {
+               dev_err(dev, "DPDMAI major version mismatch\n"
+                            "Found %u.%u, supported version is %u.%u\n",
+                               priv->dpdmai_attr.version.major,
+                               priv->dpdmai_attr.version.minor,
+                               DPDMAI_VER_MAJOR, DPDMAI_VER_MINOR);
+               goto exit;
+       }
+
+       if (priv->dpdmai_attr.version.minor > DPDMAI_VER_MINOR) {
+               dev_err(dev, "DPDMAI minor version mismatch\n"
+                            "Found %u.%u, supported version is %u.%u\n",
+                               priv->dpdmai_attr.version.major,
+                               priv->dpdmai_attr.version.minor,
+                               DPDMAI_VER_MAJOR, DPDMAI_VER_MINOR);
+               goto exit;
+       }
+
+       priv->num_pairs = min(priv->dpdmai_attr.num_of_priorities, prio_def);
+       ppriv = kcalloc(priv->num_pairs, sizeof(*ppriv), GFP_KERNEL);
+       if (!ppriv) {
+               err = -ENOMEM;
+               goto exit;
+       }
+       priv->ppriv = ppriv;
+
+       for (i = 0; i < priv->num_pairs; i++) {
+               err = dpdmai_get_rx_queue(priv->mc_io, 0, ls_dev->mc_handle,
+                                         i, &priv->rx_queue_attr[i]);
+               if (err) {
+                       dev_err(dev, "dpdmai_get_rx_queue() failed\n");
+                       goto exit;
+               }
+               ppriv->rsp_fqid = priv->rx_queue_attr[i].fqid;
+
+               err = dpdmai_get_tx_queue(priv->mc_io, 0, ls_dev->mc_handle,
+                                         i, &priv->tx_fqid[i]);
+               if (err) {
+                       dev_err(dev, "dpdmai_get_tx_queue() failed\n");
+                       goto exit;
+               }
+               ppriv->req_fqid = priv->tx_fqid[i];
+               ppriv->prio = i;
+               ppriv->priv = priv;
+               ppriv++;
+       }
+
+       return 0;
+exit:
+       dpdmai_close(priv->mc_io, 0, ls_dev->mc_handle);
+       return err;
+}
+
+static void dpaa2_qdma_fqdan_cb(struct dpaa2_io_notification_ctx *ctx)
+{
+       struct dpaa2_qdma_priv_per_prio *ppriv = container_of(ctx,
+                       struct dpaa2_qdma_priv_per_prio, nctx);
+       struct dpaa2_qdma_comp *dpaa2_comp, *_comp_tmp;
+       struct dpaa2_qdma_priv *priv = ppriv->priv;
+       u32 n_chans = priv->dpaa2_qdma->n_chans;
+       struct dpaa2_qdma_chan *qchan;
+       const struct dpaa2_fd *fd_eq;
+       const struct dpaa2_fd *fd;
+       struct dpaa2_dq *dq;
+       int is_last = 0;
+       int found;
+       u8 status;
+       int err;
+       int i;
+
+       do {
+               err = dpaa2_io_service_pull_fq(NULL, ppriv->rsp_fqid,
+                                              ppriv->store);
+       } while (err);
+
+       while (!is_last) {
+               do {
+                       dq = dpaa2_io_store_next(ppriv->store, &is_last);
+               } while (!is_last && !dq);
+               if (!dq) {
+                       dev_err(priv->dev, "FQID returned no valid frames!\n");
+                       continue;
+               }
+
+               /* obtain FD and process the error */
+               fd = dpaa2_dq_fd(dq);
+
+               status = dpaa2_fd_get_ctrl(fd) & 0xff;
+               if (status)
+                       dev_err(priv->dev, "FD error occurred\n");
+               found = 0;
+               for (i = 0; i < n_chans; i++) {
+                       qchan = &priv->dpaa2_qdma->chans[i];
+                       spin_lock(&qchan->queue_lock);
+                       if (list_empty(&qchan->comp_used)) {
+                               spin_unlock(&qchan->queue_lock);
+                               continue;
+                       }
+                       list_for_each_entry_safe(dpaa2_comp, _comp_tmp,
+                                                &qchan->comp_used, list) {
+                               fd_eq = dpaa2_comp->fd_virt_addr;
+
+                               if (le64_to_cpu(fd_eq->simple.addr) ==
+                                   le64_to_cpu(fd->simple.addr)) {
+                                       spin_lock(&qchan->vchan.lock);
+                                       vchan_cookie_complete(&
+                                                       dpaa2_comp->vdesc);
+                                       spin_unlock(&qchan->vchan.lock);
+                                       found = 1;
+                                       break;
+                               }
+                       }
+                       spin_unlock(&qchan->queue_lock);
+                       if (found)
+                               break;
+               }
+       }
+
+       dpaa2_io_service_rearm(NULL, ctx);
+}
+
+static int __cold dpaa2_qdma_dpio_setup(struct dpaa2_qdma_priv *priv)
+{
+       struct dpaa2_qdma_priv_per_prio *ppriv;
+       struct device *dev = priv->dev;
+       int err = -EINVAL;
+       int i, num;
+
+       num = priv->num_pairs;
+       ppriv = priv->ppriv;
+       for (i = 0; i < num; i++) {
+               ppriv->nctx.is_cdan = 0;
+               ppriv->nctx.desired_cpu = DPAA2_IO_ANY_CPU;
+               ppriv->nctx.id = ppriv->rsp_fqid;
+               ppriv->nctx.cb = dpaa2_qdma_fqdan_cb;
+               err = dpaa2_io_service_register(NULL, &ppriv->nctx, dev);
+               if (err) {
+                       dev_err(dev, "Notification register failed\n");
+                       goto err_service;
+               }
+
+               ppriv->store =
+                       dpaa2_io_store_create(DPAA2_QDMA_STORE_SIZE, dev);
+               if (!ppriv->store) {
+                       dev_err(dev, "dpaa2_io_store_create() failed\n");
+                       goto err_store;
+               }
+
+               ppriv++;
+       }
+       return 0;
+
+err_store:
+       dpaa2_io_service_deregister(NULL, &ppriv->nctx, dev);
+err_service:
+       ppriv--;
+       while (ppriv >= priv->ppriv) {
+               dpaa2_io_service_deregister(NULL, &ppriv->nctx, dev);
+               dpaa2_io_store_destroy(ppriv->store);
+               ppriv--;
+       }
+       return err;
+}
+
+static void dpaa2_dpmai_store_free(struct dpaa2_qdma_priv *priv)
+{
+       struct dpaa2_qdma_priv_per_prio *ppriv = priv->ppriv;
+       int i;
+
+       for (i = 0; i < priv->num_pairs; i++) {
+               dpaa2_io_store_destroy(ppriv->store);
+               ppriv++;
+       }
+}
+
+static void dpaa2_dpdmai_dpio_free(struct dpaa2_qdma_priv *priv)
+{
+       struct dpaa2_qdma_priv_per_prio *ppriv = priv->ppriv;
+       struct device *dev = priv->dev;
+       int i;
+
+       for (i = 0; i < priv->num_pairs; i++) {
+               dpaa2_io_service_deregister(NULL, &ppriv->nctx, dev);
+               ppriv++;
+       }
+}
+
+static int __cold dpaa2_dpdmai_bind(struct dpaa2_qdma_priv *priv)
+{
+       struct dpdmai_rx_queue_cfg rx_queue_cfg;
+       struct dpaa2_qdma_priv_per_prio *ppriv;
+       struct device *dev = priv->dev;
+       struct fsl_mc_device *ls_dev;
+       int i, num;
+       int err;
+
+       ls_dev = to_fsl_mc_device(dev);
+       num = priv->num_pairs;
+       ppriv = priv->ppriv;
+       for (i = 0; i < num; i++) {
+               rx_queue_cfg.options = DPDMAI_QUEUE_OPT_USER_CTX |
+                                       DPDMAI_QUEUE_OPT_DEST;
+               rx_queue_cfg.user_ctx = ppriv->nctx.qman64;
+               rx_queue_cfg.dest_cfg.dest_type = DPDMAI_DEST_DPIO;
+               rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id;
+               rx_queue_cfg.dest_cfg.priority = ppriv->prio;
+               err = dpdmai_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle,
+                                         rx_queue_cfg.dest_cfg.priority,
+                                         &rx_queue_cfg);
+               if (err) {
+                       dev_err(dev, "dpdmai_set_rx_queue() failed\n");
+                       return err;
+               }
+
+               ppriv++;
+       }
+
+       return 0;
+}
+
+static int __cold dpaa2_dpdmai_dpio_unbind(struct dpaa2_qdma_priv *priv)
+{
+       struct dpaa2_qdma_priv_per_prio *ppriv = priv->ppriv;
+       struct device *dev = priv->dev;
+       struct fsl_mc_device *ls_dev;
+       int err = 0;
+       int i;
+
+       ls_dev = to_fsl_mc_device(dev);
+
+       for (i = 0; i < priv->num_pairs; i++) {
+               ppriv->nctx.qman64 = 0;
+               ppriv->nctx.dpio_id = 0;
+               ppriv++;
+       }
+
+       err = dpdmai_reset(priv->mc_io, 0, ls_dev->mc_handle);
+       if (err)
+               dev_err(dev, "dpdmai_reset() failed\n");
+
+       return err;
+}
+
+static void dpaa2_dpdmai_free_comp(struct dpaa2_qdma_chan *qchan,
+                                  struct list_head *head)
+{
+       struct dpaa2_qdma_comp *comp_tmp, *_comp_tmp;
+       unsigned long flags;
+
+       list_for_each_entry_safe(comp_tmp, _comp_tmp,
+                                head, list) {
+               spin_lock_irqsave(&qchan->queue_lock, flags);
+               list_del(&comp_tmp->list);
+               spin_unlock_irqrestore(&qchan->queue_lock, flags);
+               dma_pool_free(qchan->fd_pool,
+                             comp_tmp->fd_virt_addr,
+                             comp_tmp->fd_bus_addr);
+               dma_pool_free(qchan->fl_pool,
+                             comp_tmp->fl_virt_addr,
+                             comp_tmp->fl_bus_addr);
+               dma_pool_free(qchan->sdd_pool,
+                             comp_tmp->desc_virt_addr,
+                             comp_tmp->desc_bus_addr);
+               kfree(comp_tmp);
+       }
+}
+
+static void dpaa2_dpdmai_free_channels(struct dpaa2_qdma_engine *dpaa2_qdma)
+{
+       struct dpaa2_qdma_chan *qchan;
+       int num, i;
+
+       num = dpaa2_qdma->n_chans;
+       for (i = 0; i < num; i++) {
+               qchan = &dpaa2_qdma->chans[i];
+               dpaa2_dpdmai_free_comp(qchan, &qchan->comp_used);
+               dpaa2_dpdmai_free_comp(qchan, &qchan->comp_free);
+               dma_pool_destroy(qchan->fd_pool);
+               dma_pool_destroy(qchan->fl_pool);
+               dma_pool_destroy(qchan->sdd_pool);
+       }
+}
+
+static void dpaa2_qdma_free_desc(struct virt_dma_desc *vdesc)
+{
+       struct dpaa2_qdma_comp *dpaa2_comp;
+       struct dpaa2_qdma_chan *qchan;
+       unsigned long flags;
+
+       dpaa2_comp = to_fsl_qdma_comp(vdesc);
+       qchan = dpaa2_comp->qchan;
+       spin_lock_irqsave(&qchan->queue_lock, flags);
+       list_del(&dpaa2_comp->list);
+       list_add_tail(&dpaa2_comp->list, &qchan->comp_free);
+       spin_unlock_irqrestore(&qchan->queue_lock, flags);
+}
+
+static int dpaa2_dpdmai_init_channels(struct dpaa2_qdma_engine *dpaa2_qdma)
+{
+       struct dpaa2_qdma_priv *priv = dpaa2_qdma->priv;
+       struct dpaa2_qdma_chan *dpaa2_chan;
+       int num = priv->num_pairs;
+       int i;
+
+       INIT_LIST_HEAD(&dpaa2_qdma->dma_dev.channels);
+       for (i = 0; i < dpaa2_qdma->n_chans; i++) {
+               dpaa2_chan = &dpaa2_qdma->chans[i];
+               dpaa2_chan->qdma = dpaa2_qdma;
+               dpaa2_chan->fqid = priv->tx_fqid[i % num];
+               dpaa2_chan->vchan.desc_free = dpaa2_qdma_free_desc;
+               vchan_init(&dpaa2_chan->vchan, &dpaa2_qdma->dma_dev);
+               spin_lock_init(&dpaa2_chan->queue_lock);
+               INIT_LIST_HEAD(&dpaa2_chan->comp_used);
+               INIT_LIST_HEAD(&dpaa2_chan->comp_free);
+       }
+       return 0;
+}
+
+static int dpaa2_qdma_probe(struct fsl_mc_device *dpdmai_dev)
+{
+       struct device *dev = &dpdmai_dev->dev;
+       struct dpaa2_qdma_engine *dpaa2_qdma;
+       struct dpaa2_qdma_priv *priv;
+       int err;
+
+       priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+       if (!priv)
+               return -ENOMEM;
+       dev_set_drvdata(dev, priv);
+       priv->dpdmai_dev = dpdmai_dev;
+
+       priv->iommu_domain = iommu_get_domain_for_dev(dev);
+       if (priv->iommu_domain)
+               smmu_disable = false;
+
+       /* obtain a MC portal */
+       err = fsl_mc_portal_allocate(dpdmai_dev, 0, &priv->mc_io);
+       if (err) {
+               if (err == -ENXIO)
+                       err = -EPROBE_DEFER;
+               else
+                       dev_err(dev, "MC portal allocation failed\n");
+               goto err_mcportal;
+       }
+
+       /* DPDMAI initialization */
+       err = dpaa2_qdma_setup(dpdmai_dev);
+       if (err) {
+               dev_err(dev, "dpaa2_dpdmai_setup() failed\n");
+               goto err_dpdmai_setup;
+       }
+
+       /* DPIO */
+       err = dpaa2_qdma_dpio_setup(priv);
+       if (err) {
+               dev_err(dev, "dpaa2_dpdmai_dpio_setup() failed\n");
+               goto err_dpio_setup;
+       }
+
+       /* DPDMAI binding to DPIO */
+       err = dpaa2_dpdmai_bind(priv);
+       if (err) {
+               dev_err(dev, "dpaa2_dpdmai_bind() failed\n");
+               goto err_bind;
+       }
+
+       /* DPDMAI enable */
+       err = dpdmai_enable(priv->mc_io, 0, dpdmai_dev->mc_handle);
+       if (err) {
+               dev_err(dev, "dpdmai_enable() faile\n");
+               goto err_enable;
+       }
+
+       dpaa2_qdma = kzalloc(sizeof(*dpaa2_qdma), GFP_KERNEL);
+       if (!dpaa2_qdma) {
+               err = -ENOMEM;
+               goto err_eng;
+       }
+
+       priv->dpaa2_qdma = dpaa2_qdma;
+       dpaa2_qdma->priv = priv;
+
+       dpaa2_qdma->desc_allocated = 0;
+       dpaa2_qdma->n_chans = NUM_CH;
+
+       dpaa2_dpdmai_init_channels(dpaa2_qdma);
+
+       if (soc_device_match(soc_fixup_tuning))
+               dpaa2_qdma->qdma_wrtype_fixup = true;
+       else
+               dpaa2_qdma->qdma_wrtype_fixup = false;
+
+       dma_cap_set(DMA_PRIVATE, dpaa2_qdma->dma_dev.cap_mask);
+       dma_cap_set(DMA_SLAVE, dpaa2_qdma->dma_dev.cap_mask);
+       dma_cap_set(DMA_MEMCPY, dpaa2_qdma->dma_dev.cap_mask);
+
+       dpaa2_qdma->dma_dev.dev = dev;
+       dpaa2_qdma->dma_dev.device_alloc_chan_resources =
+               dpaa2_qdma_alloc_chan_resources;
+       dpaa2_qdma->dma_dev.device_free_chan_resources =
+               dpaa2_qdma_free_chan_resources;
+       dpaa2_qdma->dma_dev.device_tx_status = dma_cookie_status;
+       dpaa2_qdma->dma_dev.device_prep_dma_memcpy = dpaa2_qdma_prep_memcpy;
+       dpaa2_qdma->dma_dev.device_issue_pending = dpaa2_qdma_issue_pending;
+
+       err = dma_async_device_register(&dpaa2_qdma->dma_dev);
+       if (err) {
+               dev_err(dev, "Can't register NXP QDMA engine.\n");
+               goto err_dpaa2_qdma;
+       }
+
+       return 0;
+
+err_dpaa2_qdma:
+       kfree(dpaa2_qdma);
+err_eng:
+       dpdmai_disable(priv->mc_io, 0, dpdmai_dev->mc_handle);
+err_enable:
+       dpaa2_dpdmai_dpio_unbind(priv);
+err_bind:
+       dpaa2_dpmai_store_free(priv);
+       dpaa2_dpdmai_dpio_free(priv);
+err_dpio_setup:
+       kfree(priv->ppriv);
+       dpdmai_close(priv->mc_io, 0, dpdmai_dev->mc_handle);
+err_dpdmai_setup:
+       fsl_mc_portal_free(priv->mc_io);
+err_mcportal:
+       kfree(priv);
+       dev_set_drvdata(dev, NULL);
+       return err;
+}
+
+static int dpaa2_qdma_remove(struct fsl_mc_device *ls_dev)
+{
+       struct dpaa2_qdma_engine *dpaa2_qdma;
+       struct dpaa2_qdma_priv *priv;
+       struct device *dev;
+
+       dev = &ls_dev->dev;
+       priv = dev_get_drvdata(dev);
+       dpaa2_qdma = priv->dpaa2_qdma;
+
+       dpdmai_disable(priv->mc_io, 0, ls_dev->mc_handle);
+       dpaa2_dpdmai_dpio_unbind(priv);
+       dpaa2_dpmai_store_free(priv);
+       dpaa2_dpdmai_dpio_free(priv);
+       dpdmai_close(priv->mc_io, 0, ls_dev->mc_handle);
+       fsl_mc_portal_free(priv->mc_io);
+       dev_set_drvdata(dev, NULL);
+       dpaa2_dpdmai_free_channels(dpaa2_qdma);
+
+       dma_async_device_unregister(&dpaa2_qdma->dma_dev);
+       kfree(priv);
+       kfree(dpaa2_qdma);
+
+       return 0;
+}
+
+static const struct fsl_mc_device_id dpaa2_qdma_id_table[] = {
+       {
+               .vendor = FSL_MC_VENDOR_FREESCALE,
+               .obj_type = "dpdmai",
+       },
+       { .vendor = 0x0 }
+};
+
+static struct fsl_mc_driver dpaa2_qdma_driver = {
+       .driver         = {
+               .name   = "dpaa2-qdma",
+               .owner  = THIS_MODULE,
+       },
+       .probe          = dpaa2_qdma_probe,
+       .remove         = dpaa2_qdma_remove,
+       .match_id_table = dpaa2_qdma_id_table
+};
+
+static int __init dpaa2_qdma_driver_init(void)
+{
+       return fsl_mc_driver_register(&(dpaa2_qdma_driver));
+}
+late_initcall(dpaa2_qdma_driver_init);
+
+static void __exit fsl_qdma_exit(void)
+{
+       fsl_mc_driver_unregister(&(dpaa2_qdma_driver));
+}
+module_exit(fsl_qdma_exit);
+
+MODULE_ALIAS("platform:fsl-dpaa2-qdma");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("NXP Layerscape DPAA2 qDMA engine driver");
diff --git a/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.h b/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.h
new file mode 100644 (file)
index 0000000..7d57184
--- /dev/null
@@ -0,0 +1,153 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright 2019 NXP */
+
+#ifndef __DPAA2_QDMA_H
+#define __DPAA2_QDMA_H
+
+#define DPAA2_QDMA_STORE_SIZE 16
+#define NUM_CH 8
+
+struct dpaa2_qdma_sd_d {
+       u32 rsv:32;
+       union {
+               struct {
+                       u32 ssd:12; /* souce stride distance */
+                       u32 sss:12; /* souce stride size */
+                       u32 rsv1:8;
+               } sdf;
+               struct {
+                       u32 dsd:12; /* Destination stride distance */
+                       u32 dss:12; /* Destination stride size */
+                       u32 rsv2:8;
+               } ddf;
+       } df;
+       u32 rbpcmd;     /* Route-by-port command */
+       u32 cmd;
+} __attribute__((__packed__));
+
+/* Source descriptor command read transaction type for RBP=0: */
+/* coherent copy of cacheable memory */
+#define QDMA_SD_CMD_RDTTYPE_COHERENT (0xb << 28)
+/* Destination descriptor command write transaction type for RBP=0: */
+/* coherent copy of cacheable memory */
+#define QDMA_DD_CMD_WRTTYPE_COHERENT (0x6 << 28)
+#define LX2160_QDMA_DD_CMD_WRTTYPE_COHERENT (0xb << 28)
+
+#define QMAN_FD_FMT_ENABLE     BIT(0) /* frame list table enable */
+#define QMAN_FD_BMT_ENABLE     BIT(15) /* bypass memory translation */
+#define QMAN_FD_BMT_DISABLE    (0) /* bypass memory translation */
+#define QMAN_FD_SL_DISABLE     (0) /* short lengthe disabled */
+#define QMAN_FD_SL_ENABLE      BIT(14) /* short lengthe enabled */
+
+#define QDMA_FINAL_BIT_DISABLE (0) /* final bit disable */
+#define QDMA_FINAL_BIT_ENABLE  BIT(31) /* final bit enable */
+
+#define QDMA_FD_SHORT_FORMAT   BIT(11) /* short format */
+#define QDMA_FD_LONG_FORMAT    (0) /* long format */
+#define QDMA_SER_DISABLE       (8) /* no notification */
+#define QDMA_SER_CTX           BIT(8) /* notification by FQD_CTX[fqid] */
+#define QDMA_SER_DEST          (2 << 8) /* notification by destination desc */
+#define QDMA_SER_BOTH          (3 << 8) /* soruce and dest notification */
+#define QDMA_FD_SPF_ENALBE     BIT(30) /* source prefetch enable */
+
+#define QMAN_FD_VA_ENABLE      BIT(14) /* Address used is virtual address */
+#define QMAN_FD_VA_DISABLE     (0)/* Address used is a real address */
+/* Flow Context: 49bit physical address */
+#define QMAN_FD_CBMT_ENABLE    BIT(15)
+#define QMAN_FD_CBMT_DISABLE   (0) /* Flow Context: 64bit virtual address */
+#define QMAN_FD_SC_DISABLE     (0) /* stashing control */
+
+#define QDMA_FL_FMT_SBF                (0x0) /* Single buffer frame */
+#define QDMA_FL_FMT_SGE                (0x2) /* Scatter gather frame */
+#define QDMA_FL_BMT_ENABLE     BIT(15) /* enable bypass memory translation */
+#define QDMA_FL_BMT_DISABLE    (0x0) /* enable bypass memory translation */
+#define QDMA_FL_SL_LONG                (0x0)/* long length */
+#define QDMA_FL_SL_SHORT       (0x1) /* short length */
+#define QDMA_FL_F              (0x1)/* last frame list bit */
+
+/*Description of Frame list table structure*/
+struct dpaa2_qdma_chan {
+       struct dpaa2_qdma_engine        *qdma;
+       struct virt_dma_chan            vchan;
+       struct virt_dma_desc            vdesc;
+       enum dma_status                 status;
+       u32                             fqid;
+
+       /* spinlock used by dpaa2 qdma driver */
+       spinlock_t                      queue_lock;
+       struct dma_pool                 *fd_pool;
+       struct dma_pool                 *fl_pool;
+       struct dma_pool                 *sdd_pool;
+
+       struct list_head                comp_used;
+       struct list_head                comp_free;
+
+};
+
+struct dpaa2_qdma_comp {
+       dma_addr_t              fd_bus_addr;
+       dma_addr_t              fl_bus_addr;
+       dma_addr_t              desc_bus_addr;
+       struct dpaa2_fd         *fd_virt_addr;
+       struct dpaa2_fl_entry   *fl_virt_addr;
+       struct dpaa2_qdma_sd_d  *desc_virt_addr;
+       struct dpaa2_qdma_chan  *qchan;
+       struct virt_dma_desc    vdesc;
+       struct list_head        list;
+};
+
+struct dpaa2_qdma_engine {
+       struct dma_device       dma_dev;
+       u32                     n_chans;
+       struct dpaa2_qdma_chan  chans[NUM_CH];
+       int                     qdma_wrtype_fixup;
+       int                     desc_allocated;
+
+       struct dpaa2_qdma_priv *priv;
+};
+
+/*
+ * dpaa2_qdma_priv - driver private data
+ */
+struct dpaa2_qdma_priv {
+       int dpqdma_id;
+
+       struct iommu_domain     *iommu_domain;
+       struct dpdmai_attr      dpdmai_attr;
+       struct device           *dev;
+       struct fsl_mc_io        *mc_io;
+       struct fsl_mc_device    *dpdmai_dev;
+       u8                      num_pairs;
+
+       struct dpaa2_qdma_engine        *dpaa2_qdma;
+       struct dpaa2_qdma_priv_per_prio *ppriv;
+
+       struct dpdmai_rx_queue_attr rx_queue_attr[DPDMAI_PRIO_NUM];
+       u32 tx_fqid[DPDMAI_PRIO_NUM];
+};
+
+struct dpaa2_qdma_priv_per_prio {
+       int req_fqid;
+       int rsp_fqid;
+       int prio;
+
+       struct dpaa2_io_store *store;
+       struct dpaa2_io_notification_ctx nctx;
+
+       struct dpaa2_qdma_priv *priv;
+};
+
+static struct soc_device_attribute soc_fixup_tuning[] = {
+       { .family = "QorIQ LX2160A"},
+       { },
+};
+
+/* FD pool size: one FD + 3 Frame list + 2 source/destination descriptor */
+#define FD_POOL_SIZE (sizeof(struct dpaa2_fd) + \
+               sizeof(struct dpaa2_fl_entry) * 3 + \
+               sizeof(struct dpaa2_qdma_sd_d) * 2)
+
+static void dpaa2_dpdmai_free_channels(struct dpaa2_qdma_engine *dpaa2_qdma);
+static void dpaa2_dpdmai_free_comp(struct dpaa2_qdma_chan *qchan,
+                                  struct list_head *head);
+#endif /* __DPAA2_QDMA_H */
diff --git a/drivers/dma/fsl-dpaa2-qdma/dpdmai.c b/drivers/dma/fsl-dpaa2-qdma/dpdmai.c
new file mode 100644 (file)
index 0000000..f8d2211
--- /dev/null
@@ -0,0 +1,376 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright 2019 NXP
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/io.h>
+#include <linux/fsl/mc.h>
+#include "dpdmai.h"
+
+struct dpdmai_rsp_get_attributes {
+       __le32 id;
+       u8 num_of_priorities;
+       u8 pad0[3];
+       __le16 major;
+       __le16 minor;
+};
+
+struct dpdmai_cmd_queue {
+       __le32 dest_id;
+       u8 priority;
+       u8 queue;
+       u8 dest_type;
+       u8 pad;
+       __le64 user_ctx;
+       union {
+               __le32 options;
+               __le32 fqid;
+       };
+};
+
+struct dpdmai_rsp_get_tx_queue {
+       __le64 pad;
+       __le32 fqid;
+};
+
+#define MC_CMD_OP(_cmd, _param, _offset, _width, _type, _arg) \
+       ((_cmd).params[_param] |= mc_enc((_offset), (_width), _arg))
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPDMAI_CMD_CREATE(cmd, cfg) \
+do { \
+       MC_CMD_OP(cmd, 0, 8,  8,  u8,  (cfg)->priorities[0]);\
+       MC_CMD_OP(cmd, 0, 16, 8,  u8,  (cfg)->priorities[1]);\
+} while (0)
+
+static inline u64 mc_enc(int lsoffset, int width, u64 val)
+{
+       return (val & MAKE_UMASK64(width)) << lsoffset;
+}
+
+/**
+ * dpdmai_open() - Open a control session for the specified object
+ * @mc_io:     Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @dpdmai_id: DPDMAI unique ID
+ * @token:     Returned token; use in subsequent API calls
+ *
+ * This function can be used to open a control session for an
+ * already created object; an object may have been declared in
+ * the DPL or by calling the dpdmai_create() function.
+ * This function returns a unique authentication token,
+ * associated with the specific object ID and the specific MC
+ * portal; this token must be used in all subsequent commands for
+ * this specific object.
+ *
+ * Return:     '0' on Success; Error code otherwise.
+ */
+int dpdmai_open(struct fsl_mc_io *mc_io, u32 cmd_flags,
+               int dpdmai_id, u16 *token)
+{
+       struct fsl_mc_command cmd = { 0 };
+       __le64 *cmd_dpdmai_id;
+       int err;
+
+       /* prepare command */
+       cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_OPEN,
+                                         cmd_flags, 0);
+
+       cmd_dpdmai_id = cmd.params;
+       *cmd_dpdmai_id = cpu_to_le32(dpdmai_id);
+
+       /* send command to mc*/
+       err = mc_send_command(mc_io, &cmd);
+       if (err)
+               return err;
+
+       /* retrieve response parameters */
+       *token = mc_cmd_hdr_read_token(&cmd);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(dpdmai_open);
+
+/**
+ * dpdmai_close() - Close the control session of the object
+ * @mc_io:     Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:     Token of DPDMAI object
+ *
+ * After this function is called, no further operations are
+ * allowed on the object without opening a new control session.
+ *
+ * Return:     '0' on Success; Error code otherwise.
+ */
+int dpdmai_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
+{
+       struct fsl_mc_command cmd = { 0 };
+
+       /* prepare command */
+       cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_CLOSE,
+                                         cmd_flags, token);
+
+       /* send command to mc*/
+       return mc_send_command(mc_io, &cmd);
+}
+EXPORT_SYMBOL_GPL(dpdmai_close);
+
+/**
+ * dpdmai_create() - Create the DPDMAI object
+ * @mc_io:     Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @cfg:       Configuration structure
+ * @token:     Returned token; use in subsequent API calls
+ *
+ * Create the DPDMAI object, allocate required resources and
+ * perform required initialization.
+ *
+ * The object can be created either by declaring it in the
+ * DPL file, or by calling this function.
+ *
+ * This function returns a unique authentication token,
+ * associated with the specific object ID and the specific MC
+ * portal; this token must be used in all subsequent calls to
+ * this specific object. For objects that are created using the
+ * DPL file, call dpdmai_open() function to get an authentication
+ * token first.
+ *
+ * Return:     '0' on Success; Error code otherwise.
+ */
+int dpdmai_create(struct fsl_mc_io *mc_io, u32 cmd_flags,
+                 const struct dpdmai_cfg *cfg, u16 *token)
+{
+       struct fsl_mc_command cmd = { 0 };
+       int err;
+
+       /* prepare command */
+       cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_CREATE,
+                                         cmd_flags, 0);
+       DPDMAI_CMD_CREATE(cmd, cfg);
+
+       /* send command to mc*/
+       err = mc_send_command(mc_io, &cmd);
+       if (err)
+               return err;
+
+       /* retrieve response parameters */
+       *token = mc_cmd_hdr_read_token(&cmd);
+
+       return 0;
+}
+
+/**
+ * dpdmai_enable() - Enable the DPDMAI, allow sending and receiving frames.
+ * @mc_io:     Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:     Token of DPDMAI object
+ *
+ * Return:     '0' on Success; Error code otherwise.
+ */
+int dpdmai_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
+{
+       struct fsl_mc_command cmd = { 0 };
+
+       /* prepare command */
+       cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_ENABLE,
+                                         cmd_flags, token);
+
+       /* send command to mc*/
+       return mc_send_command(mc_io, &cmd);
+}
+EXPORT_SYMBOL_GPL(dpdmai_enable);
+
+/**
+ * dpdmai_disable() - Disable the DPDMAI, stop sending and receiving frames.
+ * @mc_io:     Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:     Token of DPDMAI object
+ *
+ * Return:     '0' on Success; Error code otherwise.
+ */
+int dpdmai_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
+{
+       struct fsl_mc_command cmd = { 0 };
+
+       /* prepare command */
+       cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_DISABLE,
+                                         cmd_flags, token);
+
+       /* send command to mc*/
+       return mc_send_command(mc_io, &cmd);
+}
+EXPORT_SYMBOL_GPL(dpdmai_disable);
+
+/**
+ * dpdmai_reset() - Reset the DPDMAI, returns the object to initial state.
+ * @mc_io:     Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:     Token of DPDMAI object
+ *
+ * Return:     '0' on Success; Error code otherwise.
+ */
+int dpdmai_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
+{
+       struct fsl_mc_command cmd = { 0 };
+
+       /* prepare command */
+       cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_RESET,
+                                         cmd_flags, token);
+
+       /* send command to mc*/
+       return mc_send_command(mc_io, &cmd);
+}
+EXPORT_SYMBOL_GPL(dpdmai_reset);
+
+/**
+ * dpdmai_get_attributes() - Retrieve DPDMAI attributes.
+ * @mc_io:     Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:     Token of DPDMAI object
+ * @attr:      Returned object's attributes
+ *
+ * Return:     '0' on Success; Error code otherwise.
+ */
+int dpdmai_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags,
+                         u16 token, struct dpdmai_attr *attr)
+{
+       struct dpdmai_rsp_get_attributes *rsp_params;
+       struct fsl_mc_command cmd = { 0 };
+       int err;
+
+       /* prepare command */
+       cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_ATTR,
+                                         cmd_flags, token);
+
+       /* send command to mc*/
+       err = mc_send_command(mc_io, &cmd);
+       if (err)
+               return err;
+
+       /* retrieve response parameters */
+       rsp_params = (struct dpdmai_rsp_get_attributes *)cmd.params;
+       attr->id = le32_to_cpu(rsp_params->id);
+       attr->version.major = le16_to_cpu(rsp_params->major);
+       attr->version.minor = le16_to_cpu(rsp_params->minor);
+       attr->num_of_priorities = rsp_params->num_of_priorities;
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(dpdmai_get_attributes);
+
+/**
+ * dpdmai_set_rx_queue() - Set Rx queue configuration
+ * @mc_io:     Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:     Token of DPDMAI object
+ * @priority:  Select the queue relative to number of
+ *             priorities configured at DPDMAI creation
+ * @cfg:       Rx queue configuration
+ *
+ * Return:     '0' on Success; Error code otherwise.
+ */
+int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+                       u8 priority, const struct dpdmai_rx_queue_cfg *cfg)
+{
+       struct dpdmai_cmd_queue *cmd_params;
+       struct fsl_mc_command cmd = { 0 };
+
+       /* prepare command */
+       cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_SET_RX_QUEUE,
+                                         cmd_flags, token);
+
+       cmd_params = (struct dpdmai_cmd_queue *)cmd.params;
+       cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
+       cmd_params->priority = cfg->dest_cfg.priority;
+       cmd_params->queue = priority;
+       cmd_params->dest_type = cfg->dest_cfg.dest_type;
+       cmd_params->user_ctx = cpu_to_le64(cfg->user_ctx);
+       cmd_params->options = cpu_to_le32(cfg->options);
+
+       /* send command to mc*/
+       return mc_send_command(mc_io, &cmd);
+}
+EXPORT_SYMBOL_GPL(dpdmai_set_rx_queue);
+
+/**
+ * dpdmai_get_rx_queue() - Retrieve Rx queue attributes.
+ * @mc_io:     Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:     Token of DPDMAI object
+ * @priority:  Select the queue relative to number of
+ *                             priorities configured at DPDMAI creation
+ * @attr:      Returned Rx queue attributes
+ *
+ * Return:     '0' on Success; Error code otherwise.
+ */
+int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+                       u8 priority, struct dpdmai_rx_queue_attr *attr)
+{
+       struct dpdmai_cmd_queue *cmd_params;
+       struct fsl_mc_command cmd = { 0 };
+       int err;
+
+       /* prepare command */
+       cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_RX_QUEUE,
+                                         cmd_flags, token);
+
+       cmd_params = (struct dpdmai_cmd_queue *)cmd.params;
+       cmd_params->queue = priority;
+
+       /* send command to mc*/
+       err = mc_send_command(mc_io, &cmd);
+       if (err)
+               return err;
+
+       /* retrieve response parameters */
+       attr->dest_cfg.dest_id = le32_to_cpu(cmd_params->dest_id);
+       attr->dest_cfg.priority = cmd_params->priority;
+       attr->dest_cfg.dest_type = cmd_params->dest_type;
+       attr->user_ctx = le64_to_cpu(cmd_params->user_ctx);
+       attr->fqid = le32_to_cpu(cmd_params->fqid);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(dpdmai_get_rx_queue);
+
+/**
+ * dpdmai_get_tx_queue() - Retrieve Tx queue attributes.
+ * @mc_io:     Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token:     Token of DPDMAI object
+ * @priority:  Select the queue relative to number of
+ *                     priorities configured at DPDMAI creation
+ * @fqid:      Returned Tx queue
+ *
+ * Return:     '0' on Success; Error code otherwise.
+ */
+int dpdmai_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags,
+                       u16 token, u8 priority, u32 *fqid)
+{
+       struct dpdmai_rsp_get_tx_queue *rsp_params;
+       struct dpdmai_cmd_queue *cmd_params;
+       struct fsl_mc_command cmd = { 0 };
+       int err;
+
+       /* prepare command */
+       cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_GET_TX_QUEUE,
+                                         cmd_flags, token);
+
+       cmd_params = (struct dpdmai_cmd_queue *)cmd.params;
+       cmd_params->queue = priority;
+
+       /* send command to mc*/
+       err = mc_send_command(mc_io, &cmd);
+       if (err)
+               return err;
+
+       /* retrieve response parameters */
+
+       rsp_params = (struct dpdmai_rsp_get_tx_queue *)cmd.params;
+       *fqid = le32_to_cpu(rsp_params->fqid);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(dpdmai_get_tx_queue);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/fsl-dpaa2-qdma/dpdmai.h b/drivers/dma/fsl-dpaa2-qdma/dpdmai.h
new file mode 100644 (file)
index 0000000..6d78509
--- /dev/null
@@ -0,0 +1,177 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright 2019 NXP */
+
+#ifndef __FSL_DPDMAI_H
+#define __FSL_DPDMAI_H
+
+/* DPDMAI Version */
+#define DPDMAI_VER_MAJOR       2
+#define DPDMAI_VER_MINOR       2
+
+#define DPDMAI_CMD_BASE_VERSION        0
+#define DPDMAI_CMD_ID_OFFSET   4
+
+#define DPDMAI_CMDID_FORMAT(x) (((x) << DPDMAI_CMD_ID_OFFSET) | \
+                               DPDMAI_CMD_BASE_VERSION)
+
+/* Command IDs */
+#define DPDMAI_CMDID_CLOSE             DPDMAI_CMDID_FORMAT(0x800)
+#define DPDMAI_CMDID_OPEN               DPDMAI_CMDID_FORMAT(0x80E)
+#define DPDMAI_CMDID_CREATE             DPDMAI_CMDID_FORMAT(0x90E)
+
+#define DPDMAI_CMDID_ENABLE             DPDMAI_CMDID_FORMAT(0x002)
+#define DPDMAI_CMDID_DISABLE            DPDMAI_CMDID_FORMAT(0x003)
+#define DPDMAI_CMDID_GET_ATTR           DPDMAI_CMDID_FORMAT(0x004)
+#define DPDMAI_CMDID_RESET              DPDMAI_CMDID_FORMAT(0x005)
+#define DPDMAI_CMDID_IS_ENABLED         DPDMAI_CMDID_FORMAT(0x006)
+
+#define DPDMAI_CMDID_SET_IRQ            DPDMAI_CMDID_FORMAT(0x010)
+#define DPDMAI_CMDID_GET_IRQ            DPDMAI_CMDID_FORMAT(0x011)
+#define DPDMAI_CMDID_SET_IRQ_ENABLE     DPDMAI_CMDID_FORMAT(0x012)
+#define DPDMAI_CMDID_GET_IRQ_ENABLE     DPDMAI_CMDID_FORMAT(0x013)
+#define DPDMAI_CMDID_SET_IRQ_MASK       DPDMAI_CMDID_FORMAT(0x014)
+#define DPDMAI_CMDID_GET_IRQ_MASK       DPDMAI_CMDID_FORMAT(0x015)
+#define DPDMAI_CMDID_GET_IRQ_STATUS     DPDMAI_CMDID_FORMAT(0x016)
+#define DPDMAI_CMDID_CLEAR_IRQ_STATUS  DPDMAI_CMDID_FORMAT(0x017)
+
+#define DPDMAI_CMDID_SET_RX_QUEUE      DPDMAI_CMDID_FORMAT(0x1A0)
+#define DPDMAI_CMDID_GET_RX_QUEUE       DPDMAI_CMDID_FORMAT(0x1A1)
+#define DPDMAI_CMDID_GET_TX_QUEUE       DPDMAI_CMDID_FORMAT(0x1A2)
+
+#define MC_CMD_HDR_TOKEN_O 32  /* Token field offset */
+#define MC_CMD_HDR_TOKEN_S 16  /* Token field size */
+
+#define MAKE_UMASK64(_width) \
+       ((u64)((_width) < 64 ? ((u64)1 << (_width)) - 1 : (u64)-1))
+
+/* Data Path DMA Interface API
+ * Contains initialization APIs and runtime control APIs for DPDMAI
+ */
+
+/**
+ * Maximum number of Tx/Rx priorities per DPDMAI object
+ */
+#define DPDMAI_PRIO_NUM                2
+
+/* DPDMAI queue modification options */
+
+/**
+ * Select to modify the user's context associated with the queue
+ */
+#define DPDMAI_QUEUE_OPT_USER_CTX      0x1
+
+/**
+ * Select to modify the queue's destination
+ */
+#define DPDMAI_QUEUE_OPT_DEST          0x2
+
+/**
+ * struct dpdmai_cfg - Structure representing DPDMAI configuration
+ * @priorities: Priorities for the DMA hardware processing; valid priorities are
+ *     configured with values 1-8; the entry following last valid entry
+ *     should be configured with 0
+ */
+struct dpdmai_cfg {
+       u8 priorities[DPDMAI_PRIO_NUM];
+};
+
+/**
+ * struct dpdmai_attr - Structure representing DPDMAI attributes
+ * @id: DPDMAI object ID
+ * @version: DPDMAI version
+ * @num_of_priorities: number of priorities
+ */
+struct dpdmai_attr {
+       int     id;
+       /**
+        * struct version - DPDMAI version
+        * @major: DPDMAI major version
+        * @minor: DPDMAI minor version
+        */
+       struct {
+               u16 major;
+               u16 minor;
+       } version;
+       u8 num_of_priorities;
+};
+
+/**
+ * enum dpdmai_dest - DPDMAI destination types
+ * @DPDMAI_DEST_NONE: Unassigned destination; The queue is set in parked mode
+ *     and does not generate FQDAN notifications; user is expected to dequeue
+ *     from the queue based on polling or other user-defined method
+ * @DPDMAI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN
+ *     notifications to the specified DPIO; user is expected to dequeue
+ *     from the queue only after notification is received
+ * @DPDMAI_DEST_DPCON: The queue is set in schedule mode and does not generate
+ *     FQDAN notifications, but is connected to the specified DPCON object;
+ *     user is expected to dequeue from the DPCON channel
+ */
+enum dpdmai_dest {
+       DPDMAI_DEST_NONE = 0,
+       DPDMAI_DEST_DPIO = 1,
+       DPDMAI_DEST_DPCON = 2
+};
+
+/**
+ * struct dpdmai_dest_cfg - Structure representing DPDMAI destination parameters
+ * @dest_type: Destination type
+ * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type
+ * @priority: Priority selection within the DPIO or DPCON channel; valid values
+ *     are 0-1 or 0-7, depending on the number of priorities in that
+ *     channel; not relevant for 'DPDMAI_DEST_NONE' option
+ */
+struct dpdmai_dest_cfg {
+       enum dpdmai_dest dest_type;
+       int dest_id;
+       u8 priority;
+};
+
+/**
+ * struct dpdmai_rx_queue_cfg - DPDMAI RX queue configuration
+ * @options: Flags representing the suggested modifications to the queue;
+ *     Use any combination of 'DPDMAI_QUEUE_OPT_<X>' flags
+ * @user_ctx: User context value provided in the frame descriptor of each
+ *     dequeued frame;
+ *     valid only if 'DPDMAI_QUEUE_OPT_USER_CTX' is contained in 'options'
+ * @dest_cfg: Queue destination parameters;
+ *     valid only if 'DPDMAI_QUEUE_OPT_DEST' is contained in 'options'
+ */
+struct dpdmai_rx_queue_cfg {
+       struct dpdmai_dest_cfg dest_cfg;
+       u32 options;
+       u64 user_ctx;
+
+};
+
+/**
+ * struct dpdmai_rx_queue_attr - Structure representing attributes of Rx queues
+ * @user_ctx:  User context value provided in the frame descriptor of each
+ *      dequeued frame
+ * @dest_cfg: Queue destination configuration
+ * @fqid: Virtual FQID value to be used for dequeue operations
+ */
+struct dpdmai_rx_queue_attr {
+       struct dpdmai_dest_cfg  dest_cfg;
+       u64 user_ctx;
+       u32 fqid;
+};
+
+int dpdmai_open(struct fsl_mc_io *mc_io, u32 cmd_flags,
+               int dpdmai_id, u16 *token);
+int dpdmai_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
+int dpdmai_create(struct fsl_mc_io *mc_io, u32 cmd_flags,
+                 const struct dpdmai_cfg *cfg, u16 *token);
+int dpdmai_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
+int dpdmai_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
+int dpdmai_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
+int dpdmai_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags,
+                         u16 token, struct dpdmai_attr *attr);
+int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+                       u8 priority, const struct dpdmai_rx_queue_cfg *cfg);
+int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+                       u8 priority, struct dpdmai_rx_queue_attr *attr);
+int dpdmai_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags,
+                       u16 token, u8 priority, u32 *fqid);
+
+#endif /* __FSL_DPDMAI_H */
index 06664fbd2d911638167f35ea06ea57bd0d2565f0..89792083d62c51749023dada82b34e48ab7c07c3 100644 (file)
@@ -1155,6 +1155,9 @@ static int fsl_qdma_probe(struct platform_device *pdev)
                return ret;
 
        fsl_qdma->irq_base = platform_get_irq_byname(pdev, "qdma-queue0");
+       if (fsl_qdma->irq_base < 0)
+               return fsl_qdma->irq_base;
+
        fsl_qdma->feature = of_property_read_bool(np, "big-endian");
        INIT_LIST_HEAD(&fsl_qdma->dma_dev.channels);
 
index a3f942a6a946ab7c40196f6152891b62302a5e3a..db0e274126fb7c4b6e779108732523b76fdde12f 100644 (file)
@@ -173,7 +173,7 @@ static void __iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan)
                                        &iop_chan->chain, chain_node) {
                                        zero_sum_result |=
                                            iop_desc_get_zero_result(grp_iter);
-                                           pr_debug("\titer%d result: %d\n",
+                                       pr_debug("\titer%d result: %d\n",
                                            grp_iter->idx, zero_sum_result);
                                        slot_cnt -= slots_per_op;
                                        if (slot_cnt == 0)
@@ -1359,9 +1359,11 @@ static int iop_adma_probe(struct platform_device *pdev)
        iop_adma_device_clear_err_status(iop_chan);
 
        for (i = 0; i < 3; i++) {
-               irq_handler_t handler[] = { iop_adma_eot_handler,
-                                       iop_adma_eoc_handler,
-                                       iop_adma_err_handler };
+               static const irq_handler_t handler[] = {
+                       iop_adma_eot_handler,
+                       iop_adma_eoc_handler,
+                       iop_adma_err_handler
+               };
                int irq = platform_get_irq(pdev, i);
                if (irq < 0) {
                        ret = -ENXIO;
index 4b36c8810517631f091ce9897d68a8bbf05e59b7..adecea51814f0a2cb6a26f8593bbf86e9c931e3d 100644 (file)
@@ -835,13 +835,8 @@ static int k3_dma_probe(struct platform_device *op)
        const struct k3dma_soc_data *soc_data;
        struct k3_dma_dev *d;
        const struct of_device_id *of_id;
-       struct resource *iores;
        int i, ret, irq = 0;
 
-       iores = platform_get_resource(op, IORESOURCE_MEM, 0);
-       if (!iores)
-               return -EINVAL;
-
        d = devm_kzalloc(&op->dev, sizeof(*d), GFP_KERNEL);
        if (!d)
                return -ENOMEM;
@@ -850,7 +845,7 @@ static int k3_dma_probe(struct platform_device *op)
        if (!soc_data)
                return -EINVAL;
 
-       d->base = devm_ioremap_resource(&op->dev, iores);
+       d->base = devm_platform_ioremap_resource(op, 0);
        if (IS_ERR(d->base))
                return PTR_ERR(d->base);
 
index 723b11c190b37e6f5ca93c6238feee3624a78414..6bf838e63be1a99bd3575cf3ed72f48062b9a02a 100644 (file)
@@ -819,15 +819,7 @@ static int mtk_cqdma_probe(struct platform_device *pdev)
                INIT_LIST_HEAD(&cqdma->pc[i]->queue);
                spin_lock_init(&cqdma->pc[i]->lock);
                refcount_set(&cqdma->pc[i]->refcnt, 0);
-
-               res = platform_get_resource(pdev, IORESOURCE_MEM, i);
-               if (!res) {
-                       dev_err(&pdev->dev, "No mem resource for %s\n",
-                               dev_name(&pdev->dev));
-                       return -EINVAL;
-               }
-
-               cqdma->pc[i]->base = devm_ioremap_resource(&pdev->dev, res);
+               cqdma->pc[i]->base = devm_platform_ioremap_resource(pdev, i);
                if (IS_ERR(cqdma->pc[i]->base))
                        return PTR_ERR(cqdma->pc[i]->base);
 
index 1a2028e1c29e966a593e2e1f85d6246be51a334e..4c58da7421432cd08a2664d6f48448901ec69282 100644 (file)
@@ -997,7 +997,7 @@ static int mtk_hsdma_probe(struct platform_device *pdev)
        if (err) {
                dev_err(&pdev->dev,
                        "request_irq failed with err %d\n", err);
-               goto err_unregister;
+               goto err_free;
        }
 
        platform_set_drvdata(pdev, hsdma);
@@ -1006,6 +1006,8 @@ static int mtk_hsdma_probe(struct platform_device *pdev)
 
        return 0;
 
+err_free:
+       of_dma_controller_free(pdev->dev.of_node);
 err_unregister:
        dma_async_device_unregister(dd);
 
index f40051d6aecbcd51bcdf04f560ac130624fea596..c20e6bd4e29898eefe0cba3026291f05a1e0f7e3 100644 (file)
@@ -475,7 +475,6 @@ static int mtk_uart_apdma_probe(struct platform_device *pdev)
        struct device_node *np = pdev->dev.of_node;
        struct mtk_uart_apdmadev *mtkd;
        int bit_mask = 32, rc;
-       struct resource *res;
        struct mtk_chan *c;
        unsigned int i;
 
@@ -532,13 +531,7 @@ static int mtk_uart_apdma_probe(struct platform_device *pdev)
                        goto err_no_dma;
                }
 
-               res = platform_get_resource(pdev, IORESOURCE_MEM, i);
-               if (!res) {
-                       rc = -ENODEV;
-                       goto err_no_dma;
-               }
-
-               c->base = devm_ioremap_resource(&pdev->dev, res);
+               c->base = devm_platform_ioremap_resource(pdev, i);
                if (IS_ERR(c->base)) {
                        rc = PTR_ERR(c->base);
                        goto err_no_dma;
diff --git a/drivers/dma/milbeaut-hdmac.c b/drivers/dma/milbeaut-hdmac.c
new file mode 100644 (file)
index 0000000..8853d44
--- /dev/null
@@ -0,0 +1,578 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (C) 2019 Linaro Ltd.
+// Copyright (C) 2019 Socionext Inc.
+
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of_dma.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/bitfield.h>
+
+#include "virt-dma.h"
+
+#define MLB_HDMAC_DMACR                0x0     /* global */
+#define MLB_HDMAC_DE           BIT(31)
+#define MLB_HDMAC_DS           BIT(30)
+#define MLB_HDMAC_PR           BIT(28)
+#define MLB_HDMAC_DH           GENMASK(27, 24)
+
+#define MLB_HDMAC_CH_STRIDE    0x10
+
+#define MLB_HDMAC_DMACA                0x0     /* channel */
+#define MLB_HDMAC_EB           BIT(31)
+#define MLB_HDMAC_PB           BIT(30)
+#define MLB_HDMAC_ST           BIT(29)
+#define MLB_HDMAC_IS           GENMASK(28, 24)
+#define MLB_HDMAC_BT           GENMASK(23, 20)
+#define MLB_HDMAC_BC           GENMASK(19, 16)
+#define MLB_HDMAC_TC           GENMASK(15, 0)
+#define MLB_HDMAC_DMACB                0x4
+#define MLB_HDMAC_TT           GENMASK(31, 30)
+#define MLB_HDMAC_MS           GENMASK(29, 28)
+#define MLB_HDMAC_TW           GENMASK(27, 26)
+#define MLB_HDMAC_FS           BIT(25)
+#define MLB_HDMAC_FD           BIT(24)
+#define MLB_HDMAC_RC           BIT(23)
+#define MLB_HDMAC_RS           BIT(22)
+#define MLB_HDMAC_RD           BIT(21)
+#define MLB_HDMAC_EI           BIT(20)
+#define MLB_HDMAC_CI           BIT(19)
+#define HDMAC_PAUSE            0x7
+#define MLB_HDMAC_SS           GENMASK(18, 16)
+#define MLB_HDMAC_SP           GENMASK(15, 12)
+#define MLB_HDMAC_DP           GENMASK(11, 8)
+#define MLB_HDMAC_DMACSA       0x8
+#define MLB_HDMAC_DMACDA       0xc
+
+#define MLB_HDMAC_BUSWIDTHS            (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
+                                       BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
+                                       BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
+
+struct milbeaut_hdmac_desc {
+       struct virt_dma_desc vd;
+       struct scatterlist *sgl;
+       unsigned int sg_len;
+       unsigned int sg_cur;
+       enum dma_transfer_direction dir;
+};
+
+struct milbeaut_hdmac_chan {
+       struct virt_dma_chan vc;
+       struct milbeaut_hdmac_device *mdev;
+       struct milbeaut_hdmac_desc *md;
+       void __iomem *reg_ch_base;
+       unsigned int slave_id;
+       struct dma_slave_config cfg;
+};
+
+struct milbeaut_hdmac_device {
+       struct dma_device ddev;
+       struct clk *clk;
+       void __iomem *reg_base;
+       struct milbeaut_hdmac_chan channels[0];
+};
+
+static struct milbeaut_hdmac_chan *
+to_milbeaut_hdmac_chan(struct virt_dma_chan *vc)
+{
+       return container_of(vc, struct milbeaut_hdmac_chan, vc);
+}
+
+static struct milbeaut_hdmac_desc *
+to_milbeaut_hdmac_desc(struct virt_dma_desc *vd)
+{
+       return container_of(vd, struct milbeaut_hdmac_desc, vd);
+}
+
+/* mc->vc.lock must be held by caller */
+static struct milbeaut_hdmac_desc *
+milbeaut_hdmac_next_desc(struct milbeaut_hdmac_chan *mc)
+{
+       struct virt_dma_desc *vd;
+
+       vd = vchan_next_desc(&mc->vc);
+       if (!vd) {
+               mc->md = NULL;
+               return NULL;
+       }
+
+       list_del(&vd->node);
+
+       mc->md = to_milbeaut_hdmac_desc(vd);
+
+       return mc->md;
+}
+
+/* mc->vc.lock must be held by caller */
+static void milbeaut_chan_start(struct milbeaut_hdmac_chan *mc,
+                               struct milbeaut_hdmac_desc *md)
+{
+       struct scatterlist *sg;
+       u32 cb, ca, src_addr, dest_addr, len;
+       u32 width, burst;
+
+       sg = &md->sgl[md->sg_cur];
+       len = sg_dma_len(sg);
+
+       cb = MLB_HDMAC_CI | MLB_HDMAC_EI;
+       if (md->dir == DMA_MEM_TO_DEV) {
+               cb |= MLB_HDMAC_FD;
+               width = mc->cfg.dst_addr_width;
+               burst = mc->cfg.dst_maxburst;
+               src_addr = sg_dma_address(sg);
+               dest_addr = mc->cfg.dst_addr;
+       } else {
+               cb |= MLB_HDMAC_FS;
+               width = mc->cfg.src_addr_width;
+               burst = mc->cfg.src_maxburst;
+               src_addr = mc->cfg.src_addr;
+               dest_addr = sg_dma_address(sg);
+       }
+       cb |= FIELD_PREP(MLB_HDMAC_TW, (width >> 1));
+       cb |= FIELD_PREP(MLB_HDMAC_MS, 2);
+
+       writel_relaxed(MLB_HDMAC_DE, mc->mdev->reg_base + MLB_HDMAC_DMACR);
+       writel_relaxed(src_addr, mc->reg_ch_base + MLB_HDMAC_DMACSA);
+       writel_relaxed(dest_addr, mc->reg_ch_base + MLB_HDMAC_DMACDA);
+       writel_relaxed(cb, mc->reg_ch_base + MLB_HDMAC_DMACB);
+
+       ca = FIELD_PREP(MLB_HDMAC_IS, mc->slave_id);
+       if (burst == 16)
+               ca |= FIELD_PREP(MLB_HDMAC_BT, 0xf);
+       else if (burst == 8)
+               ca |= FIELD_PREP(MLB_HDMAC_BT, 0xd);
+       else if (burst == 4)
+               ca |= FIELD_PREP(MLB_HDMAC_BT, 0xb);
+       burst *= width;
+       ca |= FIELD_PREP(MLB_HDMAC_TC, (len / burst - 1));
+       writel_relaxed(ca, mc->reg_ch_base + MLB_HDMAC_DMACA);
+       ca |= MLB_HDMAC_EB;
+       writel_relaxed(ca, mc->reg_ch_base + MLB_HDMAC_DMACA);
+}
+
+/* mc->vc.lock must be held by caller */
+static void milbeaut_hdmac_start(struct milbeaut_hdmac_chan *mc)
+{
+       struct milbeaut_hdmac_desc *md;
+
+       md = milbeaut_hdmac_next_desc(mc);
+       if (md)
+               milbeaut_chan_start(mc, md);
+}
+
+static irqreturn_t milbeaut_hdmac_interrupt(int irq, void *dev_id)
+{
+       struct milbeaut_hdmac_chan *mc = dev_id;
+       struct milbeaut_hdmac_desc *md;
+       u32 val;
+
+       spin_lock(&mc->vc.lock);
+
+       /* Ack and Disable irqs */
+       val = readl_relaxed(mc->reg_ch_base + MLB_HDMAC_DMACB);
+       val &= ~(FIELD_PREP(MLB_HDMAC_SS, HDMAC_PAUSE));
+       writel_relaxed(val, mc->reg_ch_base + MLB_HDMAC_DMACB);
+       val &= ~MLB_HDMAC_EI;
+       val &= ~MLB_HDMAC_CI;
+       writel_relaxed(val, mc->reg_ch_base + MLB_HDMAC_DMACB);
+
+       md = mc->md;
+       if (!md)
+               goto out;
+
+       md->sg_cur++;
+
+       if (md->sg_cur >= md->sg_len) {
+               vchan_cookie_complete(&md->vd);
+               md = milbeaut_hdmac_next_desc(mc);
+               if (!md)
+                       goto out;
+       }
+
+       milbeaut_chan_start(mc, md);
+
+out:
+       spin_unlock(&mc->vc.lock);
+       return IRQ_HANDLED;
+}
+
+static void milbeaut_hdmac_free_chan_resources(struct dma_chan *chan)
+{
+       vchan_free_chan_resources(to_virt_chan(chan));
+}
+
+static int
+milbeaut_hdmac_chan_config(struct dma_chan *chan, struct dma_slave_config *cfg)
+{
+       struct virt_dma_chan *vc = to_virt_chan(chan);
+       struct milbeaut_hdmac_chan *mc = to_milbeaut_hdmac_chan(vc);
+
+       spin_lock(&mc->vc.lock);
+       mc->cfg = *cfg;
+       spin_unlock(&mc->vc.lock);
+
+       return 0;
+}
+
+static int milbeaut_hdmac_chan_pause(struct dma_chan *chan)
+{
+       struct virt_dma_chan *vc = to_virt_chan(chan);
+       struct milbeaut_hdmac_chan *mc = to_milbeaut_hdmac_chan(vc);
+       u32 val;
+
+       spin_lock(&mc->vc.lock);
+       val = readl_relaxed(mc->reg_ch_base + MLB_HDMAC_DMACA);
+       val |= MLB_HDMAC_PB;
+       writel_relaxed(val, mc->reg_ch_base + MLB_HDMAC_DMACA);
+       spin_unlock(&mc->vc.lock);
+
+       return 0;
+}
+
+static int milbeaut_hdmac_chan_resume(struct dma_chan *chan)
+{
+       struct virt_dma_chan *vc = to_virt_chan(chan);
+       struct milbeaut_hdmac_chan *mc = to_milbeaut_hdmac_chan(vc);
+       u32 val;
+
+       spin_lock(&mc->vc.lock);
+       val = readl_relaxed(mc->reg_ch_base + MLB_HDMAC_DMACA);
+       val &= ~MLB_HDMAC_PB;
+       writel_relaxed(val, mc->reg_ch_base + MLB_HDMAC_DMACA);
+       spin_unlock(&mc->vc.lock);
+
+       return 0;
+}
+
+static struct dma_async_tx_descriptor *
+milbeaut_hdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+                            unsigned int sg_len,
+                            enum dma_transfer_direction direction,
+                            unsigned long flags, void *context)
+{
+       struct virt_dma_chan *vc = to_virt_chan(chan);
+       struct milbeaut_hdmac_desc *md;
+       int i;
+
+       if (!is_slave_direction(direction))
+               return NULL;
+
+       md = kzalloc(sizeof(*md), GFP_NOWAIT);
+       if (!md)
+               return NULL;
+
+       md->sgl = kzalloc(sizeof(*sgl) * sg_len, GFP_NOWAIT);
+       if (!md->sgl) {
+               kfree(md);
+               return NULL;
+       }
+
+       for (i = 0; i < sg_len; i++)
+               md->sgl[i] = sgl[i];
+
+       md->sg_len = sg_len;
+       md->dir = direction;
+
+       return vchan_tx_prep(vc, &md->vd, flags);
+}
+
+static int milbeaut_hdmac_terminate_all(struct dma_chan *chan)
+{
+       struct virt_dma_chan *vc = to_virt_chan(chan);
+       struct milbeaut_hdmac_chan *mc = to_milbeaut_hdmac_chan(vc);
+       unsigned long flags;
+       u32 val;
+
+       LIST_HEAD(head);
+
+       spin_lock_irqsave(&vc->lock, flags);
+
+       val = readl_relaxed(mc->reg_ch_base + MLB_HDMAC_DMACA);
+       val &= ~MLB_HDMAC_EB; /* disable the channel */
+       writel_relaxed(val, mc->reg_ch_base + MLB_HDMAC_DMACA);
+
+       if (mc->md) {
+               vchan_terminate_vdesc(&mc->md->vd);
+               mc->md = NULL;
+       }
+
+       vchan_get_all_descriptors(vc, &head);
+
+       spin_unlock_irqrestore(&vc->lock, flags);
+
+       vchan_dma_desc_free_list(vc, &head);
+
+       return 0;
+}
+
+static void milbeaut_hdmac_synchronize(struct dma_chan *chan)
+{
+       vchan_synchronize(to_virt_chan(chan));
+}
+
+static enum dma_status milbeaut_hdmac_tx_status(struct dma_chan *chan,
+                                               dma_cookie_t cookie,
+                                               struct dma_tx_state *txstate)
+{
+       struct virt_dma_chan *vc;
+       struct virt_dma_desc *vd;
+       struct milbeaut_hdmac_chan *mc;
+       struct milbeaut_hdmac_desc *md = NULL;
+       enum dma_status stat;
+       unsigned long flags;
+       int i;
+
+       stat = dma_cookie_status(chan, cookie, txstate);
+       /* Return immediately if we do not need to compute the residue. */
+       if (stat == DMA_COMPLETE || !txstate)
+               return stat;
+
+       vc = to_virt_chan(chan);
+
+       spin_lock_irqsave(&vc->lock, flags);
+
+       mc = to_milbeaut_hdmac_chan(vc);
+
+       /* residue from the on-flight chunk */
+       if (mc->md && mc->md->vd.tx.cookie == cookie) {
+               struct scatterlist *sg;
+               u32 done;
+
+               md = mc->md;
+               sg = &md->sgl[md->sg_cur];
+
+               if (md->dir == DMA_DEV_TO_MEM)
+                       done = readl_relaxed(mc->reg_ch_base
+                                            + MLB_HDMAC_DMACDA);
+               else
+                       done = readl_relaxed(mc->reg_ch_base
+                                            + MLB_HDMAC_DMACSA);
+               done -= sg_dma_address(sg);
+
+               txstate->residue = -done;
+       }
+
+       if (!md) {
+               vd = vchan_find_desc(vc, cookie);
+               if (vd)
+                       md = to_milbeaut_hdmac_desc(vd);
+       }
+
+       if (md) {
+               /* residue from the queued chunks */
+               for (i = md->sg_cur; i < md->sg_len; i++)
+                       txstate->residue += sg_dma_len(&md->sgl[i]);
+       }
+
+       spin_unlock_irqrestore(&vc->lock, flags);
+
+       return stat;
+}
+
+static void milbeaut_hdmac_issue_pending(struct dma_chan *chan)
+{
+       struct virt_dma_chan *vc = to_virt_chan(chan);
+       struct milbeaut_hdmac_chan *mc = to_milbeaut_hdmac_chan(vc);
+       unsigned long flags;
+
+       spin_lock_irqsave(&vc->lock, flags);
+
+       if (vchan_issue_pending(vc) && !mc->md)
+               milbeaut_hdmac_start(mc);
+
+       spin_unlock_irqrestore(&vc->lock, flags);
+}
+
+static void milbeaut_hdmac_desc_free(struct virt_dma_desc *vd)
+{
+       struct milbeaut_hdmac_desc *md = to_milbeaut_hdmac_desc(vd);
+
+       kfree(md->sgl);
+       kfree(md);
+}
+
+static struct dma_chan *
+milbeaut_hdmac_xlate(struct of_phandle_args *dma_spec, struct of_dma *of_dma)
+{
+       struct milbeaut_hdmac_device *mdev = of_dma->of_dma_data;
+       struct milbeaut_hdmac_chan *mc;
+       struct virt_dma_chan *vc;
+       struct dma_chan *chan;
+
+       if (dma_spec->args_count != 1)
+               return NULL;
+
+       chan = dma_get_any_slave_channel(&mdev->ddev);
+       if (!chan)
+               return NULL;
+
+       vc = to_virt_chan(chan);
+       mc = to_milbeaut_hdmac_chan(vc);
+       mc->slave_id = dma_spec->args[0];
+
+       return chan;
+}
+
+static int milbeaut_hdmac_chan_init(struct platform_device *pdev,
+                                   struct milbeaut_hdmac_device *mdev,
+                                   int chan_id)
+{
+       struct device *dev = &pdev->dev;
+       struct milbeaut_hdmac_chan *mc = &mdev->channels[chan_id];
+       char *irq_name;
+       int irq, ret;
+
+       irq = platform_get_irq(pdev, chan_id);
+       if (irq < 0)
+               return irq;
+
+       irq_name = devm_kasprintf(dev, GFP_KERNEL, "milbeaut-hdmac-%d",
+                                 chan_id);
+       if (!irq_name)
+               return -ENOMEM;
+
+       ret = devm_request_irq(dev, irq, milbeaut_hdmac_interrupt,
+                              IRQF_SHARED, irq_name, mc);
+       if (ret)
+               return ret;
+
+       mc->mdev = mdev;
+       mc->reg_ch_base = mdev->reg_base + MLB_HDMAC_CH_STRIDE * (chan_id + 1);
+       mc->vc.desc_free = milbeaut_hdmac_desc_free;
+       vchan_init(&mc->vc, &mdev->ddev);
+
+       return 0;
+}
+
+static int milbeaut_hdmac_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct milbeaut_hdmac_device *mdev;
+       struct dma_device *ddev;
+       int nr_chans, ret, i;
+
+       nr_chans = platform_irq_count(pdev);
+       if (nr_chans < 0)
+               return nr_chans;
+
+       ret = dma_set_mask(dev, DMA_BIT_MASK(32));
+       if (ret)
+               return ret;
+
+       mdev = devm_kzalloc(dev, struct_size(mdev, channels, nr_chans),
+                           GFP_KERNEL);
+       if (!mdev)
+               return -ENOMEM;
+
+       mdev->reg_base = devm_platform_ioremap_resource(pdev, 0);
+       if (IS_ERR(mdev->reg_base))
+               return PTR_ERR(mdev->reg_base);
+
+       mdev->clk = devm_clk_get(dev, NULL);
+       if (IS_ERR(mdev->clk)) {
+               dev_err(dev, "failed to get clock\n");
+               return PTR_ERR(mdev->clk);
+       }
+
+       ret = clk_prepare_enable(mdev->clk);
+       if (ret)
+               return ret;
+
+       ddev = &mdev->ddev;
+       ddev->dev = dev;
+       dma_cap_set(DMA_SLAVE, ddev->cap_mask);
+       dma_cap_set(DMA_PRIVATE, ddev->cap_mask);
+       ddev->src_addr_widths = MLB_HDMAC_BUSWIDTHS;
+       ddev->dst_addr_widths = MLB_HDMAC_BUSWIDTHS;
+       ddev->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
+       ddev->device_free_chan_resources = milbeaut_hdmac_free_chan_resources;
+       ddev->device_config = milbeaut_hdmac_chan_config;
+       ddev->device_pause = milbeaut_hdmac_chan_pause;
+       ddev->device_resume = milbeaut_hdmac_chan_resume;
+       ddev->device_prep_slave_sg = milbeaut_hdmac_prep_slave_sg;
+       ddev->device_terminate_all = milbeaut_hdmac_terminate_all;
+       ddev->device_synchronize = milbeaut_hdmac_synchronize;
+       ddev->device_tx_status = milbeaut_hdmac_tx_status;
+       ddev->device_issue_pending = milbeaut_hdmac_issue_pending;
+       INIT_LIST_HEAD(&ddev->channels);
+
+       for (i = 0; i < nr_chans; i++) {
+               ret = milbeaut_hdmac_chan_init(pdev, mdev, i);
+               if (ret)
+                       goto disable_clk;
+       }
+
+       ret = dma_async_device_register(ddev);
+       if (ret)
+               goto disable_clk;
+
+       ret = of_dma_controller_register(dev->of_node,
+                                        milbeaut_hdmac_xlate, mdev);
+       if (ret)
+               goto unregister_dmac;
+
+       platform_set_drvdata(pdev, mdev);
+
+       return 0;
+
+unregister_dmac:
+       dma_async_device_unregister(ddev);
+disable_clk:
+       clk_disable_unprepare(mdev->clk);
+
+       return ret;
+}
+
+static int milbeaut_hdmac_remove(struct platform_device *pdev)
+{
+       struct milbeaut_hdmac_device *mdev = platform_get_drvdata(pdev);
+       struct dma_chan *chan;
+       int ret;
+
+       /*
+        * Before reaching here, almost all descriptors have been freed by the
+        * ->device_free_chan_resources() hook. However, each channel might
+        * be still holding one descriptor that was on-flight at that moment.
+        * Terminate it to make sure this hardware is no longer running. Then,
+        * free the channel resources once again to avoid memory leak.
+        */
+       list_for_each_entry(chan, &mdev->ddev.channels, device_node) {
+               ret = dmaengine_terminate_sync(chan);
+               if (ret)
+                       return ret;
+               milbeaut_hdmac_free_chan_resources(chan);
+       }
+
+       of_dma_controller_free(pdev->dev.of_node);
+       dma_async_device_unregister(&mdev->ddev);
+       clk_disable_unprepare(mdev->clk);
+
+       return 0;
+}
+
+static const struct of_device_id milbeaut_hdmac_match[] = {
+       { .compatible = "socionext,milbeaut-m10v-hdmac" },
+       { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, milbeaut_hdmac_match);
+
+static struct platform_driver milbeaut_hdmac_driver = {
+       .probe = milbeaut_hdmac_probe,
+       .remove = milbeaut_hdmac_remove,
+       .driver = {
+               .name = "milbeaut-m10v-hdmac",
+               .of_match_table = milbeaut_hdmac_match,
+       },
+};
+module_platform_driver(milbeaut_hdmac_driver);
+
+MODULE_DESCRIPTION("Milbeaut HDMAC DmaEngine driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/milbeaut-xdmac.c b/drivers/dma/milbeaut-xdmac.c
new file mode 100644 (file)
index 0000000..ab3d2f3
--- /dev/null
@@ -0,0 +1,415 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (C) 2019 Linaro Ltd.
+// Copyright (C) 2019 Socionext Inc.
+
+#include <linux/bits.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of_dma.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/bitfield.h>
+
+#include "virt-dma.h"
+
+/* global register */
+#define M10V_XDACS 0x00
+
+/* channel local register */
+#define M10V_XDTBC 0x10
+#define M10V_XDSSA 0x14
+#define M10V_XDDSA 0x18
+#define M10V_XDSAC 0x1C
+#define M10V_XDDAC 0x20
+#define M10V_XDDCC 0x24
+#define M10V_XDDES 0x28
+#define M10V_XDDPC 0x2C
+#define M10V_XDDSD 0x30
+
+#define M10V_XDACS_XE BIT(28)
+
+#define M10V_DEFBS     0x3
+#define M10V_DEFBL     0xf
+
+#define M10V_XDSAC_SBS GENMASK(17, 16)
+#define M10V_XDSAC_SBL GENMASK(11, 8)
+
+#define M10V_XDDAC_DBS GENMASK(17, 16)
+#define M10V_XDDAC_DBL GENMASK(11, 8)
+
+#define M10V_XDDES_CE  BIT(28)
+#define M10V_XDDES_SE  BIT(24)
+#define M10V_XDDES_SA  BIT(15)
+#define M10V_XDDES_TF  GENMASK(23, 20)
+#define M10V_XDDES_EI  BIT(1)
+#define M10V_XDDES_TI  BIT(0)
+
+#define M10V_XDDSD_IS_MASK     GENMASK(3, 0)
+#define M10V_XDDSD_IS_NORMAL   0x8
+
+#define MLB_XDMAC_BUSWIDTHS    (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
+                                BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
+                                BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
+                                BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
+
+struct milbeaut_xdmac_desc {
+       struct virt_dma_desc vd;
+       size_t len;
+       dma_addr_t src;
+       dma_addr_t dst;
+};
+
+struct milbeaut_xdmac_chan {
+       struct virt_dma_chan vc;
+       struct milbeaut_xdmac_desc *md;
+       void __iomem *reg_ch_base;
+};
+
+struct milbeaut_xdmac_device {
+       struct dma_device ddev;
+       void __iomem *reg_base;
+       struct milbeaut_xdmac_chan channels[0];
+};
+
+static struct milbeaut_xdmac_chan *
+to_milbeaut_xdmac_chan(struct virt_dma_chan *vc)
+{
+       return container_of(vc, struct milbeaut_xdmac_chan, vc);
+}
+
+static struct milbeaut_xdmac_desc *
+to_milbeaut_xdmac_desc(struct virt_dma_desc *vd)
+{
+       return container_of(vd, struct milbeaut_xdmac_desc, vd);
+}
+
+/* mc->vc.lock must be held by caller */
+static struct milbeaut_xdmac_desc *
+milbeaut_xdmac_next_desc(struct milbeaut_xdmac_chan *mc)
+{
+       struct virt_dma_desc *vd;
+
+       vd = vchan_next_desc(&mc->vc);
+       if (!vd) {
+               mc->md = NULL;
+               return NULL;
+       }
+
+       list_del(&vd->node);
+
+       mc->md = to_milbeaut_xdmac_desc(vd);
+
+       return mc->md;
+}
+
+/* mc->vc.lock must be held by caller */
+static void milbeaut_chan_start(struct milbeaut_xdmac_chan *mc,
+                               struct milbeaut_xdmac_desc *md)
+{
+       u32 val;
+
+       /* Setup the channel */
+       val = md->len - 1;
+       writel_relaxed(val, mc->reg_ch_base + M10V_XDTBC);
+
+       val = md->src;
+       writel_relaxed(val, mc->reg_ch_base + M10V_XDSSA);
+
+       val = md->dst;
+       writel_relaxed(val, mc->reg_ch_base + M10V_XDDSA);
+
+       val = readl_relaxed(mc->reg_ch_base + M10V_XDSAC);
+       val &= ~(M10V_XDSAC_SBS | M10V_XDSAC_SBL);
+       val |= FIELD_PREP(M10V_XDSAC_SBS, M10V_DEFBS) |
+               FIELD_PREP(M10V_XDSAC_SBL, M10V_DEFBL);
+       writel_relaxed(val, mc->reg_ch_base + M10V_XDSAC);
+
+       val = readl_relaxed(mc->reg_ch_base + M10V_XDDAC);
+       val &= ~(M10V_XDDAC_DBS | M10V_XDDAC_DBL);
+       val |= FIELD_PREP(M10V_XDDAC_DBS, M10V_DEFBS) |
+               FIELD_PREP(M10V_XDDAC_DBL, M10V_DEFBL);
+       writel_relaxed(val, mc->reg_ch_base + M10V_XDDAC);
+
+       /* Start the channel */
+       val = readl_relaxed(mc->reg_ch_base + M10V_XDDES);
+       val &= ~(M10V_XDDES_CE | M10V_XDDES_SE | M10V_XDDES_TF |
+                M10V_XDDES_EI | M10V_XDDES_TI);
+       val |= FIELD_PREP(M10V_XDDES_CE, 1) | FIELD_PREP(M10V_XDDES_SE, 1) |
+               FIELD_PREP(M10V_XDDES_TF, 1) | FIELD_PREP(M10V_XDDES_EI, 1) |
+               FIELD_PREP(M10V_XDDES_TI, 1);
+       writel_relaxed(val, mc->reg_ch_base + M10V_XDDES);
+}
+
+/* mc->vc.lock must be held by caller */
+static void milbeaut_xdmac_start(struct milbeaut_xdmac_chan *mc)
+{
+       struct milbeaut_xdmac_desc *md;
+
+       md = milbeaut_xdmac_next_desc(mc);
+       if (md)
+               milbeaut_chan_start(mc, md);
+}
+
+static irqreturn_t milbeaut_xdmac_interrupt(int irq, void *dev_id)
+{
+       struct milbeaut_xdmac_chan *mc = dev_id;
+       struct milbeaut_xdmac_desc *md;
+       unsigned long flags;
+       u32 val;
+
+       spin_lock_irqsave(&mc->vc.lock, flags);
+
+       /* Ack and Stop */
+       val = FIELD_PREP(M10V_XDDSD_IS_MASK, 0x0);
+       writel_relaxed(val, mc->reg_ch_base + M10V_XDDSD);
+
+       md = mc->md;
+       if (!md)
+               goto out;
+
+       vchan_cookie_complete(&md->vd);
+
+       milbeaut_xdmac_start(mc);
+out:
+       spin_unlock_irqrestore(&mc->vc.lock, flags);
+       return IRQ_HANDLED;
+}
+
+static void milbeaut_xdmac_free_chan_resources(struct dma_chan *chan)
+{
+       vchan_free_chan_resources(to_virt_chan(chan));
+}
+
+static struct dma_async_tx_descriptor *
+milbeaut_xdmac_prep_memcpy(struct dma_chan *chan, dma_addr_t dst,
+                          dma_addr_t src, size_t len, unsigned long flags)
+{
+       struct virt_dma_chan *vc = to_virt_chan(chan);
+       struct milbeaut_xdmac_desc *md;
+
+       md = kzalloc(sizeof(*md), GFP_NOWAIT);
+       if (!md)
+               return NULL;
+
+       md->len = len;
+       md->src = src;
+       md->dst = dst;
+
+       return vchan_tx_prep(vc, &md->vd, flags);
+}
+
+static int milbeaut_xdmac_terminate_all(struct dma_chan *chan)
+{
+       struct virt_dma_chan *vc = to_virt_chan(chan);
+       struct milbeaut_xdmac_chan *mc = to_milbeaut_xdmac_chan(vc);
+       unsigned long flags;
+       u32 val;
+
+       LIST_HEAD(head);
+
+       spin_lock_irqsave(&vc->lock, flags);
+
+       /* Halt the channel */
+       val = readl(mc->reg_ch_base + M10V_XDDES);
+       val &= ~M10V_XDDES_CE;
+       val |= FIELD_PREP(M10V_XDDES_CE, 0);
+       writel(val, mc->reg_ch_base + M10V_XDDES);
+
+       if (mc->md) {
+               vchan_terminate_vdesc(&mc->md->vd);
+               mc->md = NULL;
+       }
+
+       vchan_get_all_descriptors(vc, &head);
+
+       spin_unlock_irqrestore(&vc->lock, flags);
+
+       vchan_dma_desc_free_list(vc, &head);
+
+       return 0;
+}
+
+static void milbeaut_xdmac_synchronize(struct dma_chan *chan)
+{
+       vchan_synchronize(to_virt_chan(chan));
+}
+
+static void milbeaut_xdmac_issue_pending(struct dma_chan *chan)
+{
+       struct virt_dma_chan *vc = to_virt_chan(chan);
+       struct milbeaut_xdmac_chan *mc = to_milbeaut_xdmac_chan(vc);
+       unsigned long flags;
+
+       spin_lock_irqsave(&vc->lock, flags);
+
+       if (vchan_issue_pending(vc) && !mc->md)
+               milbeaut_xdmac_start(mc);
+
+       spin_unlock_irqrestore(&vc->lock, flags);
+}
+
+static void milbeaut_xdmac_desc_free(struct virt_dma_desc *vd)
+{
+       kfree(to_milbeaut_xdmac_desc(vd));
+}
+
+static int milbeaut_xdmac_chan_init(struct platform_device *pdev,
+                                   struct milbeaut_xdmac_device *mdev,
+                                   int chan_id)
+{
+       struct device *dev = &pdev->dev;
+       struct milbeaut_xdmac_chan *mc = &mdev->channels[chan_id];
+       char *irq_name;
+       int irq, ret;
+
+       irq = platform_get_irq(pdev, chan_id);
+       if (irq < 0)
+               return irq;
+
+       irq_name = devm_kasprintf(dev, GFP_KERNEL, "milbeaut-xdmac-%d",
+                                 chan_id);
+       if (!irq_name)
+               return -ENOMEM;
+
+       ret = devm_request_irq(dev, irq, milbeaut_xdmac_interrupt,
+                              IRQF_SHARED, irq_name, mc);
+       if (ret)
+               return ret;
+
+       mc->reg_ch_base = mdev->reg_base + chan_id * 0x30;
+
+       mc->vc.desc_free = milbeaut_xdmac_desc_free;
+       vchan_init(&mc->vc, &mdev->ddev);
+
+       return 0;
+}
+
+static void enable_xdmac(struct milbeaut_xdmac_device *mdev)
+{
+       unsigned int val;
+
+       val = readl(mdev->reg_base + M10V_XDACS);
+       val |= M10V_XDACS_XE;
+       writel(val, mdev->reg_base + M10V_XDACS);
+}
+
+static void disable_xdmac(struct milbeaut_xdmac_device *mdev)
+{
+       unsigned int val;
+
+       val = readl(mdev->reg_base + M10V_XDACS);
+       val &= ~M10V_XDACS_XE;
+       writel(val, mdev->reg_base + M10V_XDACS);
+}
+
+static int milbeaut_xdmac_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct milbeaut_xdmac_device *mdev;
+       struct dma_device *ddev;
+       int nr_chans, ret, i;
+
+       nr_chans = platform_irq_count(pdev);
+       if (nr_chans < 0)
+               return nr_chans;
+
+       mdev = devm_kzalloc(dev, struct_size(mdev, channels, nr_chans),
+                           GFP_KERNEL);
+       if (!mdev)
+               return -ENOMEM;
+
+       mdev->reg_base = devm_platform_ioremap_resource(pdev, 0);
+       if (IS_ERR(mdev->reg_base))
+               return PTR_ERR(mdev->reg_base);
+
+       ddev = &mdev->ddev;
+       ddev->dev = dev;
+       dma_cap_set(DMA_MEMCPY, ddev->cap_mask);
+       ddev->src_addr_widths = MLB_XDMAC_BUSWIDTHS;
+       ddev->dst_addr_widths = MLB_XDMAC_BUSWIDTHS;
+       ddev->device_free_chan_resources = milbeaut_xdmac_free_chan_resources;
+       ddev->device_prep_dma_memcpy = milbeaut_xdmac_prep_memcpy;
+       ddev->device_terminate_all = milbeaut_xdmac_terminate_all;
+       ddev->device_synchronize = milbeaut_xdmac_synchronize;
+       ddev->device_tx_status = dma_cookie_status;
+       ddev->device_issue_pending = milbeaut_xdmac_issue_pending;
+       INIT_LIST_HEAD(&ddev->channels);
+
+       for (i = 0; i < nr_chans; i++) {
+               ret = milbeaut_xdmac_chan_init(pdev, mdev, i);
+               if (ret)
+                       return ret;
+       }
+
+       enable_xdmac(mdev);
+
+       ret = dma_async_device_register(ddev);
+       if (ret)
+               return ret;
+
+       ret = of_dma_controller_register(dev->of_node,
+                                        of_dma_simple_xlate, mdev);
+       if (ret)
+               goto unregister_dmac;
+
+       platform_set_drvdata(pdev, mdev);
+
+       return 0;
+
+unregister_dmac:
+       dma_async_device_unregister(ddev);
+       return ret;
+}
+
+static int milbeaut_xdmac_remove(struct platform_device *pdev)
+{
+       struct milbeaut_xdmac_device *mdev = platform_get_drvdata(pdev);
+       struct dma_chan *chan;
+       int ret;
+
+       /*
+        * Before reaching here, almost all descriptors have been freed by the
+        * ->device_free_chan_resources() hook. However, each channel might
+        * be still holding one descriptor that was on-flight at that moment.
+        * Terminate it to make sure this hardware is no longer running. Then,
+        * free the channel resources once again to avoid memory leak.
+        */
+       list_for_each_entry(chan, &mdev->ddev.channels, device_node) {
+               ret = dmaengine_terminate_sync(chan);
+               if (ret)
+                       return ret;
+               milbeaut_xdmac_free_chan_resources(chan);
+       }
+
+       of_dma_controller_free(pdev->dev.of_node);
+       dma_async_device_unregister(&mdev->ddev);
+
+       disable_xdmac(mdev);
+
+       return 0;
+}
+
+static const struct of_device_id milbeaut_xdmac_match[] = {
+       { .compatible = "socionext,milbeaut-m10v-xdmac" },
+       { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, milbeaut_xdmac_match);
+
+static struct platform_driver milbeaut_xdmac_driver = {
+       .probe = milbeaut_xdmac_probe,
+       .remove = milbeaut_xdmac_remove,
+       .driver = {
+               .name = "milbeaut-m10v-xdmac",
+               .of_match_table = milbeaut_xdmac_match,
+       },
+};
+module_platform_driver(milbeaut_xdmac_driver);
+
+MODULE_DESCRIPTION("Milbeaut XDMAC DmaEngine driver");
+MODULE_LICENSE("GPL v2");
index 90bbcef99ef849f2af4311721728e2182d6044d8..023f951189a727af61771991c85be0ac650449e3 100644 (file)
@@ -1045,18 +1045,13 @@ static int owl_dma_probe(struct platform_device *pdev)
 {
        struct device_node *np = pdev->dev.of_node;
        struct owl_dma *od;
-       struct resource *res;
        int ret, i, nr_channels, nr_requests;
 
        od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL);
        if (!od)
                return -ENOMEM;
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res)
-               return -EINVAL;
-
-       od->base = devm_ioremap_resource(&pdev->dev, res);
+       od->base = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(od->base))
                return PTR_ERR(od->base);
 
diff --git a/drivers/dma/sf-pdma/Kconfig b/drivers/dma/sf-pdma/Kconfig
new file mode 100644 (file)
index 0000000..f8ffa02
--- /dev/null
@@ -0,0 +1,6 @@
+config SF_PDMA
+       tristate "Sifive PDMA controller driver"
+       select DMA_ENGINE
+       select DMA_VIRTUAL_CHANNELS
+       help
+         Support the SiFive PDMA controller.
diff --git a/drivers/dma/sf-pdma/Makefile b/drivers/dma/sf-pdma/Makefile
new file mode 100644 (file)
index 0000000..764552a
--- /dev/null
@@ -0,0 +1 @@
+obj-$(CONFIG_SF_PDMA)   += sf-pdma.o
diff --git a/drivers/dma/sf-pdma/sf-pdma.c b/drivers/dma/sf-pdma/sf-pdma.c
new file mode 100644 (file)
index 0000000..16fe005
--- /dev/null
@@ -0,0 +1,621 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/**
+ * SiFive FU540 Platform DMA driver
+ * Copyright (C) 2019 SiFive
+ *
+ * Based partially on:
+ * - drivers/dma/fsl-edma.c
+ * - drivers/dma/dw-edma/
+ * - drivers/dma/pxa-dma.c
+ *
+ * See the following sources for further documentation:
+ * - Chapter 12 "Platform DMA Engine (PDMA)" of
+ *   SiFive FU540-C000 v1.0
+ *   https://static.dev.sifive.com/FU540-C000-v1.0.pdf
+ */
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/mod_devicetable.h>
+#include <linux/dma-mapping.h>
+#include <linux/of.h>
+
+#include "sf-pdma.h"
+
+#ifndef readq
+static inline unsigned long long readq(void __iomem *addr)
+{
+       return readl(addr) | (((unsigned long long)readl(addr + 4)) << 32LL);
+}
+#endif
+
+#ifndef writeq
+static inline void writeq(unsigned long long v, void __iomem *addr)
+{
+       writel(lower_32_bits(v), addr);
+       writel(upper_32_bits(v), addr + 4);
+}
+#endif
+
+static inline struct sf_pdma_chan *to_sf_pdma_chan(struct dma_chan *dchan)
+{
+       return container_of(dchan, struct sf_pdma_chan, vchan.chan);
+}
+
+static inline struct sf_pdma_desc *to_sf_pdma_desc(struct virt_dma_desc *vd)
+{
+       return container_of(vd, struct sf_pdma_desc, vdesc);
+}
+
+static struct sf_pdma_desc *sf_pdma_alloc_desc(struct sf_pdma_chan *chan)
+{
+       struct sf_pdma_desc *desc;
+       unsigned long flags;
+
+       spin_lock_irqsave(&chan->lock, flags);
+
+       if (chan->desc && !chan->desc->in_use) {
+               spin_unlock_irqrestore(&chan->lock, flags);
+               return chan->desc;
+       }
+
+       spin_unlock_irqrestore(&chan->lock, flags);
+
+       desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
+       if (!desc)
+               return NULL;
+
+       desc->chan = chan;
+
+       return desc;
+}
+
+static void sf_pdma_fill_desc(struct sf_pdma_desc *desc,
+                             u64 dst, u64 src, u64 size)
+{
+       desc->xfer_type = PDMA_FULL_SPEED;
+       desc->xfer_size = size;
+       desc->dst_addr = dst;
+       desc->src_addr = src;
+}
+
+static void sf_pdma_disclaim_chan(struct sf_pdma_chan *chan)
+{
+       struct pdma_regs *regs = &chan->regs;
+
+       writel(PDMA_CLEAR_CTRL, regs->ctrl);
+}
+
+static struct dma_async_tx_descriptor *
+sf_pdma_prep_dma_memcpy(struct dma_chan *dchan,        dma_addr_t dest, dma_addr_t src,
+                       size_t len, unsigned long flags)
+{
+       struct sf_pdma_chan *chan = to_sf_pdma_chan(dchan);
+       struct sf_pdma_desc *desc;
+
+       if (chan && (!len || !dest || !src)) {
+               dev_err(chan->pdma->dma_dev.dev,
+                       "Please check dma len, dest, src!\n");
+               return NULL;
+       }
+
+       desc = sf_pdma_alloc_desc(chan);
+       if (!desc)
+               return NULL;
+
+       desc->in_use = true;
+       desc->dirn = DMA_MEM_TO_MEM;
+       desc->async_tx = vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
+
+       spin_lock_irqsave(&chan->vchan.lock, flags);
+       chan->desc = desc;
+       sf_pdma_fill_desc(desc, dest, src, len);
+       spin_unlock_irqrestore(&chan->vchan.lock, flags);
+
+       return desc->async_tx;
+}
+
+static int sf_pdma_slave_config(struct dma_chan *dchan,
+                               struct dma_slave_config *cfg)
+{
+       struct sf_pdma_chan *chan = to_sf_pdma_chan(dchan);
+
+       memcpy(&chan->cfg, cfg, sizeof(*cfg));
+
+       return 0;
+}
+
+static int sf_pdma_alloc_chan_resources(struct dma_chan *dchan)
+{
+       struct sf_pdma_chan *chan = to_sf_pdma_chan(dchan);
+       struct pdma_regs *regs = &chan->regs;
+
+       dma_cookie_init(dchan);
+       writel(PDMA_CLAIM_MASK, regs->ctrl);
+
+       return 0;
+}
+
+static void sf_pdma_disable_request(struct sf_pdma_chan *chan)
+{
+       struct pdma_regs *regs = &chan->regs;
+
+       writel(readl(regs->ctrl) & ~PDMA_RUN_MASK, regs->ctrl);
+}
+
+static void sf_pdma_free_chan_resources(struct dma_chan *dchan)
+{
+       struct sf_pdma_chan *chan = to_sf_pdma_chan(dchan);
+       unsigned long flags;
+       LIST_HEAD(head);
+
+       spin_lock_irqsave(&chan->vchan.lock, flags);
+       sf_pdma_disable_request(chan);
+       kfree(chan->desc);
+       chan->desc = NULL;
+       vchan_get_all_descriptors(&chan->vchan, &head);
+       vchan_dma_desc_free_list(&chan->vchan, &head);
+       sf_pdma_disclaim_chan(chan);
+       spin_unlock_irqrestore(&chan->vchan.lock, flags);
+}
+
+static size_t sf_pdma_desc_residue(struct sf_pdma_chan *chan,
+                                  dma_cookie_t cookie)
+{
+       struct virt_dma_desc *vd = NULL;
+       struct pdma_regs *regs = &chan->regs;
+       unsigned long flags;
+       u64 residue = 0;
+       struct sf_pdma_desc *desc;
+       struct dma_async_tx_descriptor *tx;
+
+       spin_lock_irqsave(&chan->vchan.lock, flags);
+
+       tx = &chan->desc->vdesc.tx;
+       if (cookie == tx->chan->completed_cookie)
+               goto out;
+
+       if (cookie == tx->cookie) {
+               residue = readq(regs->residue);
+       } else {
+               vd = vchan_find_desc(&chan->vchan, cookie);
+               if (!vd)
+                       goto out;
+
+               desc = to_sf_pdma_desc(vd);
+               residue = desc->xfer_size;
+       }
+
+out:
+       spin_unlock_irqrestore(&chan->vchan.lock, flags);
+       return residue;
+}
+
+static enum dma_status
+sf_pdma_tx_status(struct dma_chan *dchan,
+                 dma_cookie_t cookie,
+                 struct dma_tx_state *txstate)
+{
+       struct sf_pdma_chan *chan = to_sf_pdma_chan(dchan);
+       enum dma_status status;
+
+       status = dma_cookie_status(dchan, cookie, txstate);
+
+       if (txstate && status != DMA_ERROR)
+               dma_set_residue(txstate, sf_pdma_desc_residue(chan, cookie));
+
+       return status;
+}
+
+static int sf_pdma_terminate_all(struct dma_chan *dchan)
+{
+       struct sf_pdma_chan *chan = to_sf_pdma_chan(dchan);
+       unsigned long flags;
+       LIST_HEAD(head);
+
+       spin_lock_irqsave(&chan->vchan.lock, flags);
+       sf_pdma_disable_request(chan);
+       kfree(chan->desc);
+       chan->desc = NULL;
+       chan->xfer_err = false;
+       vchan_get_all_descriptors(&chan->vchan, &head);
+       vchan_dma_desc_free_list(&chan->vchan, &head);
+       spin_unlock_irqrestore(&chan->vchan.lock, flags);
+
+       return 0;
+}
+
+static void sf_pdma_enable_request(struct sf_pdma_chan *chan)
+{
+       struct pdma_regs *regs = &chan->regs;
+       u32 v;
+
+       v = PDMA_CLAIM_MASK |
+               PDMA_ENABLE_DONE_INT_MASK |
+               PDMA_ENABLE_ERR_INT_MASK |
+               PDMA_RUN_MASK;
+
+       writel(v, regs->ctrl);
+}
+
+static void sf_pdma_xfer_desc(struct sf_pdma_chan *chan)
+{
+       struct sf_pdma_desc *desc = chan->desc;
+       struct pdma_regs *regs = &chan->regs;
+
+       if (!desc) {
+               dev_err(chan->pdma->dma_dev.dev, "NULL desc.\n");
+               return;
+       }
+
+       writel(desc->xfer_type, regs->xfer_type);
+       writeq(desc->xfer_size, regs->xfer_size);
+       writeq(desc->dst_addr, regs->dst_addr);
+       writeq(desc->src_addr, regs->src_addr);
+
+       chan->desc = desc;
+       chan->status = DMA_IN_PROGRESS;
+       sf_pdma_enable_request(chan);
+}
+
+static void sf_pdma_issue_pending(struct dma_chan *dchan)
+{
+       struct sf_pdma_chan *chan = to_sf_pdma_chan(dchan);
+       unsigned long flags;
+
+       spin_lock_irqsave(&chan->vchan.lock, flags);
+
+       if (vchan_issue_pending(&chan->vchan) && chan->desc)
+               sf_pdma_xfer_desc(chan);
+
+       spin_unlock_irqrestore(&chan->vchan.lock, flags);
+}
+
+static void sf_pdma_free_desc(struct virt_dma_desc *vdesc)
+{
+       struct sf_pdma_desc *desc;
+
+       desc = to_sf_pdma_desc(vdesc);
+       desc->in_use = false;
+}
+
+static void sf_pdma_donebh_tasklet(unsigned long arg)
+{
+       struct sf_pdma_chan *chan = (struct sf_pdma_chan *)arg;
+       struct sf_pdma_desc *desc = chan->desc;
+       unsigned long flags;
+
+       spin_lock_irqsave(&chan->lock, flags);
+       if (chan->xfer_err) {
+               chan->retries = MAX_RETRY;
+               chan->status = DMA_COMPLETE;
+               chan->xfer_err = false;
+       }
+       spin_unlock_irqrestore(&chan->lock, flags);
+
+       dmaengine_desc_get_callback_invoke(desc->async_tx, NULL);
+}
+
+static void sf_pdma_errbh_tasklet(unsigned long arg)
+{
+       struct sf_pdma_chan *chan = (struct sf_pdma_chan *)arg;
+       struct sf_pdma_desc *desc = chan->desc;
+       unsigned long flags;
+
+       spin_lock_irqsave(&chan->lock, flags);
+       if (chan->retries <= 0) {
+               /* fail to recover */
+               spin_unlock_irqrestore(&chan->lock, flags);
+               dmaengine_desc_get_callback_invoke(desc->async_tx, NULL);
+       } else {
+               /* retry */
+               chan->retries--;
+               chan->xfer_err = true;
+               chan->status = DMA_ERROR;
+
+               sf_pdma_enable_request(chan);
+               spin_unlock_irqrestore(&chan->lock, flags);
+       }
+}
+
+static irqreturn_t sf_pdma_done_isr(int irq, void *dev_id)
+{
+       struct sf_pdma_chan *chan = dev_id;
+       struct pdma_regs *regs = &chan->regs;
+       unsigned long flags;
+       u64 residue;
+
+       spin_lock_irqsave(&chan->vchan.lock, flags);
+       writel((readl(regs->ctrl)) & ~PDMA_DONE_STATUS_MASK, regs->ctrl);
+       residue = readq(regs->residue);
+
+       if (!residue) {
+               list_del(&chan->desc->vdesc.node);
+               vchan_cookie_complete(&chan->desc->vdesc);
+       } else {
+               /* submit next trascatioin if possible */
+               struct sf_pdma_desc *desc = chan->desc;
+
+               desc->src_addr += desc->xfer_size - residue;
+               desc->dst_addr += desc->xfer_size - residue;
+               desc->xfer_size = residue;
+
+               sf_pdma_xfer_desc(chan);
+       }
+
+       spin_unlock_irqrestore(&chan->vchan.lock, flags);
+
+       tasklet_hi_schedule(&chan->done_tasklet);
+
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t sf_pdma_err_isr(int irq, void *dev_id)
+{
+       struct sf_pdma_chan *chan = dev_id;
+       struct pdma_regs *regs = &chan->regs;
+       unsigned long flags;
+
+       spin_lock_irqsave(&chan->lock, flags);
+       writel((readl(regs->ctrl)) & ~PDMA_ERR_STATUS_MASK, regs->ctrl);
+       spin_unlock_irqrestore(&chan->lock, flags);
+
+       tasklet_schedule(&chan->err_tasklet);
+
+       return IRQ_HANDLED;
+}
+
+/**
+ * sf_pdma_irq_init() - Init PDMA IRQ Handlers
+ * @pdev: pointer of platform_device
+ * @pdma: pointer of PDMA engine. Caller should check NULL
+ *
+ * Initialize DONE and ERROR interrupt handler for 4 channels. Caller should
+ * make sure the pointer passed in are non-NULL. This function should be called
+ * only one time during the device probe.
+ *
+ * Context: Any context.
+ *
+ * Return:
+ * * 0         - OK to init all IRQ handlers
+ * * -EINVAL   - Fail to request IRQ
+ */
+static int sf_pdma_irq_init(struct platform_device *pdev, struct sf_pdma *pdma)
+{
+       int irq, r, i;
+       struct sf_pdma_chan *chan;
+
+       for (i = 0; i < pdma->n_chans; i++) {
+               chan = &pdma->chans[i];
+
+               irq = platform_get_irq(pdev, i * 2);
+               if (irq < 0) {
+                       dev_err(&pdev->dev, "ch(%d) Can't get done irq.\n", i);
+                       return -EINVAL;
+               }
+
+               r = devm_request_irq(&pdev->dev, irq, sf_pdma_done_isr, 0,
+                                    dev_name(&pdev->dev), (void *)chan);
+               if (r) {
+                       dev_err(&pdev->dev, "Fail to attach done ISR: %d\n", r);
+                       return -EINVAL;
+               }
+
+               chan->txirq = irq;
+
+               irq = platform_get_irq(pdev, (i * 2) + 1);
+               if (irq < 0) {
+                       dev_err(&pdev->dev, "ch(%d) Can't get err irq.\n", i);
+                       return -EINVAL;
+               }
+
+               r = devm_request_irq(&pdev->dev, irq, sf_pdma_err_isr, 0,
+                                    dev_name(&pdev->dev), (void *)chan);
+               if (r) {
+                       dev_err(&pdev->dev, "Fail to attach err ISR: %d\n", r);
+                       return -EINVAL;
+               }
+
+               chan->errirq = irq;
+       }
+
+       return 0;
+}
+
+/**
+ * sf_pdma_setup_chans() - Init settings of each channel
+ * @pdma: pointer of PDMA engine. Caller should check NULL
+ *
+ * Initialize all data structure and register base. Caller should make sure
+ * the pointer passed in are non-NULL. This function should be called only
+ * one time during the device probe.
+ *
+ * Context: Any context.
+ *
+ * Return: none
+ */
+#define SF_PDMA_REG_BASE(ch)   (pdma->membase + (PDMA_CHAN_OFFSET * (ch)))
+static void sf_pdma_setup_chans(struct sf_pdma *pdma)
+{
+       int i;
+       struct sf_pdma_chan *chan;
+
+       INIT_LIST_HEAD(&pdma->dma_dev.channels);
+
+       for (i = 0; i < pdma->n_chans; i++) {
+               chan = &pdma->chans[i];
+
+               chan->regs.ctrl =
+                       SF_PDMA_REG_BASE(i) + PDMA_CTRL;
+               chan->regs.xfer_type =
+                       SF_PDMA_REG_BASE(i) + PDMA_XFER_TYPE;
+               chan->regs.xfer_size =
+                       SF_PDMA_REG_BASE(i) + PDMA_XFER_SIZE;
+               chan->regs.dst_addr =
+                       SF_PDMA_REG_BASE(i) + PDMA_DST_ADDR;
+               chan->regs.src_addr =
+                       SF_PDMA_REG_BASE(i) + PDMA_SRC_ADDR;
+               chan->regs.act_type =
+                       SF_PDMA_REG_BASE(i) + PDMA_ACT_TYPE;
+               chan->regs.residue =
+                       SF_PDMA_REG_BASE(i) + PDMA_REMAINING_BYTE;
+               chan->regs.cur_dst_addr =
+                       SF_PDMA_REG_BASE(i) + PDMA_CUR_DST_ADDR;
+               chan->regs.cur_src_addr =
+                       SF_PDMA_REG_BASE(i) + PDMA_CUR_SRC_ADDR;
+
+               chan->pdma = pdma;
+               chan->pm_state = RUNNING;
+               chan->slave_id = i;
+               chan->xfer_err = false;
+               spin_lock_init(&chan->lock);
+
+               chan->vchan.desc_free = sf_pdma_free_desc;
+               vchan_init(&chan->vchan, &pdma->dma_dev);
+
+               writel(PDMA_CLEAR_CTRL, chan->regs.ctrl);
+
+               tasklet_init(&chan->done_tasklet,
+                            sf_pdma_donebh_tasklet, (unsigned long)chan);
+               tasklet_init(&chan->err_tasklet,
+                            sf_pdma_errbh_tasklet, (unsigned long)chan);
+       }
+}
+
+static int sf_pdma_probe(struct platform_device *pdev)
+{
+       struct sf_pdma *pdma;
+       struct sf_pdma_chan *chan;
+       struct resource *res;
+       int len, chans;
+       int ret;
+       const enum dma_slave_buswidth widths =
+               DMA_SLAVE_BUSWIDTH_1_BYTE | DMA_SLAVE_BUSWIDTH_2_BYTES |
+               DMA_SLAVE_BUSWIDTH_4_BYTES | DMA_SLAVE_BUSWIDTH_8_BYTES |
+               DMA_SLAVE_BUSWIDTH_16_BYTES | DMA_SLAVE_BUSWIDTH_32_BYTES |
+               DMA_SLAVE_BUSWIDTH_64_BYTES;
+
+       chans = PDMA_NR_CH;
+       len = sizeof(*pdma) + sizeof(*chan) * chans;
+       pdma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
+       if (!pdma)
+               return -ENOMEM;
+
+       pdma->n_chans = chans;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       pdma->membase = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(pdma->membase))
+               goto ERR_MEMBASE;
+
+       ret = sf_pdma_irq_init(pdev, pdma);
+       if (ret)
+               goto ERR_INITIRQ;
+
+       sf_pdma_setup_chans(pdma);
+
+       pdma->dma_dev.dev = &pdev->dev;
+
+       /* Setup capability */
+       dma_cap_set(DMA_MEMCPY, pdma->dma_dev.cap_mask);
+       pdma->dma_dev.copy_align = 2;
+       pdma->dma_dev.src_addr_widths = widths;
+       pdma->dma_dev.dst_addr_widths = widths;
+       pdma->dma_dev.directions = BIT(DMA_MEM_TO_MEM);
+       pdma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
+       pdma->dma_dev.descriptor_reuse = true;
+
+       /* Setup DMA APIs */
+       pdma->dma_dev.device_alloc_chan_resources =
+               sf_pdma_alloc_chan_resources;
+       pdma->dma_dev.device_free_chan_resources =
+               sf_pdma_free_chan_resources;
+       pdma->dma_dev.device_tx_status = sf_pdma_tx_status;
+       pdma->dma_dev.device_prep_dma_memcpy = sf_pdma_prep_dma_memcpy;
+       pdma->dma_dev.device_config = sf_pdma_slave_config;
+       pdma->dma_dev.device_terminate_all = sf_pdma_terminate_all;
+       pdma->dma_dev.device_issue_pending = sf_pdma_issue_pending;
+
+       platform_set_drvdata(pdev, pdma);
+
+       ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+       if (ret)
+               dev_warn(&pdev->dev,
+                        "Failed to set DMA mask. Fall back to default.\n");
+
+       ret = dma_async_device_register(&pdma->dma_dev);
+       if (ret)
+               goto ERR_REG_DMADEVICE;
+
+       return 0;
+
+ERR_MEMBASE:
+       devm_kfree(&pdev->dev, pdma);
+       return PTR_ERR(pdma->membase);
+
+ERR_INITIRQ:
+       devm_kfree(&pdev->dev, pdma);
+       return ret;
+
+ERR_REG_DMADEVICE:
+       devm_kfree(&pdev->dev, pdma);
+       dev_err(&pdev->dev,
+               "Can't register SiFive Platform DMA. (%d)\n", ret);
+       return ret;
+}
+
+static int sf_pdma_remove(struct platform_device *pdev)
+{
+       struct sf_pdma *pdma = platform_get_drvdata(pdev);
+       struct sf_pdma_chan *ch;
+       int i;
+
+       for (i = 0; i < PDMA_NR_CH; i++) {
+               ch = &pdma->chans[i];
+
+               devm_free_irq(&pdev->dev, ch->txirq, ch);
+               devm_free_irq(&pdev->dev, ch->errirq, ch);
+               list_del(&ch->vchan.chan.device_node);
+               tasklet_kill(&ch->vchan.task);
+               tasklet_kill(&ch->done_tasklet);
+               tasklet_kill(&ch->err_tasklet);
+       }
+
+       dma_async_device_unregister(&pdma->dma_dev);
+
+       return 0;
+}
+
+static const struct of_device_id sf_pdma_dt_ids[] = {
+       { .compatible = "sifive,fu540-c000-pdma" },
+       {},
+};
+MODULE_DEVICE_TABLE(of, sf_pdma_dt_ids);
+
+static struct platform_driver sf_pdma_driver = {
+       .probe          = sf_pdma_probe,
+       .remove         = sf_pdma_remove,
+       .driver         = {
+               .name   = "sf-pdma",
+               .of_match_table = of_match_ptr(sf_pdma_dt_ids),
+       },
+};
+
+static int __init sf_pdma_init(void)
+{
+       return platform_driver_register(&sf_pdma_driver);
+}
+
+static void __exit sf_pdma_exit(void)
+{
+       platform_driver_unregister(&sf_pdma_driver);
+}
+
+/* do early init */
+subsys_initcall(sf_pdma_init);
+module_exit(sf_pdma_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("SiFive Platform DMA driver");
+MODULE_AUTHOR("Green Wan <green.wan@sifive.com>");
diff --git a/drivers/dma/sf-pdma/sf-pdma.h b/drivers/dma/sf-pdma/sf-pdma.h
new file mode 100644 (file)
index 0000000..55816c9
--- /dev/null
@@ -0,0 +1,122 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/**
+ * SiFive FU540 Platform DMA driver
+ * Copyright (C) 2019 SiFive
+ *
+ * Based partially on:
+ * - drivers/dma/fsl-edma.c
+ * - drivers/dma/dw-edma/
+ * - drivers/dma/pxa-dma.c
+ *
+ * See the following sources for further documentation:
+ * - Chapter 12 "Platform DMA Engine (PDMA)" of
+ *   SiFive FU540-C000 v1.0
+ *   https://static.dev.sifive.com/FU540-C000-v1.0.pdf
+ */
+#ifndef _SF_PDMA_H
+#define _SF_PDMA_H
+
+#include <linux/dmaengine.h>
+#include <linux/dma-direction.h>
+
+#include "../dmaengine.h"
+#include "../virt-dma.h"
+
+#define PDMA_NR_CH                                     4
+
+#if (PDMA_NR_CH != 4)
+#error "Please define PDMA_NR_CH to 4"
+#endif
+
+#define PDMA_BASE_ADDR                                 0x3000000
+#define PDMA_CHAN_OFFSET                               0x1000
+
+/* Register Offset */
+#define PDMA_CTRL                                      0x000
+#define PDMA_XFER_TYPE                                 0x004
+#define PDMA_XFER_SIZE                                 0x008
+#define PDMA_DST_ADDR                                  0x010
+#define PDMA_SRC_ADDR                                  0x018
+#define PDMA_ACT_TYPE                                  0x104 /* Read-only */
+#define PDMA_REMAINING_BYTE                            0x108 /* Read-only */
+#define PDMA_CUR_DST_ADDR                              0x110 /* Read-only*/
+#define PDMA_CUR_SRC_ADDR                              0x118 /* Read-only*/
+
+/* CTRL */
+#define PDMA_CLEAR_CTRL                                        0x0
+#define PDMA_CLAIM_MASK                                        GENMASK(0, 0)
+#define PDMA_RUN_MASK                                  GENMASK(1, 1)
+#define PDMA_ENABLE_DONE_INT_MASK                      GENMASK(14, 14)
+#define PDMA_ENABLE_ERR_INT_MASK                       GENMASK(15, 15)
+#define PDMA_DONE_STATUS_MASK                          GENMASK(30, 30)
+#define PDMA_ERR_STATUS_MASK                           GENMASK(31, 31)
+
+/* Transfer Type */
+#define PDMA_FULL_SPEED                                        0xFF000008
+
+/* Error Recovery */
+#define MAX_RETRY                                      1
+
+struct pdma_regs {
+       /* read-write regs */
+       void __iomem *ctrl;             /* 4 bytes */
+
+       void __iomem *xfer_type;        /* 4 bytes */
+       void __iomem *xfer_size;        /* 8 bytes */
+       void __iomem *dst_addr;         /* 8 bytes */
+       void __iomem *src_addr;         /* 8 bytes */
+
+       /* read-only */
+       void __iomem *act_type;         /* 4 bytes */
+       void __iomem *residue;          /* 8 bytes */
+       void __iomem *cur_dst_addr;     /* 8 bytes */
+       void __iomem *cur_src_addr;     /* 8 bytes */
+};
+
+struct sf_pdma_desc {
+       u32                             xfer_type;
+       u64                             xfer_size;
+       u64                             dst_addr;
+       u64                             src_addr;
+       struct virt_dma_desc            vdesc;
+       struct sf_pdma_chan             *chan;
+       bool                            in_use;
+       enum dma_transfer_direction     dirn;
+       struct dma_async_tx_descriptor *async_tx;
+};
+
+enum sf_pdma_pm_state {
+       RUNNING = 0,
+       SUSPENDED,
+};
+
+struct sf_pdma_chan {
+       struct virt_dma_chan            vchan;
+       enum dma_status                 status;
+       enum sf_pdma_pm_state           pm_state;
+       u32                             slave_id;
+       struct sf_pdma                  *pdma;
+       struct sf_pdma_desc             *desc;
+       struct dma_slave_config         cfg;
+       u32                             attr;
+       dma_addr_t                      dma_dev_addr;
+       u32                             dma_dev_size;
+       struct tasklet_struct           done_tasklet;
+       struct tasklet_struct           err_tasklet;
+       struct pdma_regs                regs;
+       spinlock_t                      lock; /* protect chan data */
+       bool                            xfer_err;
+       int                             txirq;
+       int                             errirq;
+       int                             retries;
+};
+
+struct sf_pdma {
+       struct dma_device       dma_dev;
+       void __iomem            *membase;
+       void __iomem            *mappedbase;
+       u32                     n_chans;
+       struct sf_pdma_chan     chans[PDMA_NR_CH];
+};
+
+#endif /* _SF_PDMA_H */
index 3993ab65c62cd34469f755cfdcac52da98f96fef..f06016d38a05f284ffabd53d54e267300979bb06 100644 (file)
@@ -203,19 +203,27 @@ struct rcar_dmac {
 
        unsigned int n_channels;
        struct rcar_dmac_chan *channels;
-       unsigned int channels_mask;
+       u32 channels_mask;
 
        DECLARE_BITMAP(modules, 256);
 };
 
 #define to_rcar_dmac(d)                container_of(d, struct rcar_dmac, engine)
 
+/*
+ * struct rcar_dmac_of_data - This driver's OF data
+ * @chan_offset_base: DMAC channels base offset
+ * @chan_offset_stride: DMAC channels offset stride
+ */
+struct rcar_dmac_of_data {
+       u32 chan_offset_base;
+       u32 chan_offset_stride;
+};
+
 /* -----------------------------------------------------------------------------
  * Registers
  */
 
-#define RCAR_DMAC_CHAN_OFFSET(i)       (0x8000 + 0x80 * (i))
-
 #define RCAR_DMAISTA                   0x0020
 #define RCAR_DMASEC                    0x0030
 #define RCAR_DMAOR                     0x0060
@@ -1726,6 +1734,7 @@ static const struct dev_pm_ops rcar_dmac_pm = {
 
 static int rcar_dmac_chan_probe(struct rcar_dmac *dmac,
                                struct rcar_dmac_chan *rchan,
+                               const struct rcar_dmac_of_data *data,
                                unsigned int index)
 {
        struct platform_device *pdev = to_platform_device(dmac->dev);
@@ -1735,7 +1744,8 @@ static int rcar_dmac_chan_probe(struct rcar_dmac *dmac,
        int ret;
 
        rchan->index = index;
-       rchan->iomem = dmac->iomem + RCAR_DMAC_CHAN_OFFSET(index);
+       rchan->iomem = dmac->iomem + data->chan_offset_base +
+                      data->chan_offset_stride * index;
        rchan->mid_rid = -EINVAL;
 
        spin_lock_init(&rchan->lock);
@@ -1800,7 +1810,15 @@ static int rcar_dmac_parse_of(struct device *dev, struct rcar_dmac *dmac)
                return -EINVAL;
        }
 
+       /*
+        * If the driver is unable to read dma-channel-mask property,
+        * the driver assumes that it can use all channels.
+        */
        dmac->channels_mask = GENMASK(dmac->n_channels - 1, 0);
+       of_property_read_u32(np, "dma-channel-mask", &dmac->channels_mask);
+
+       /* If the property has out-of-channel mask, this driver clears it */
+       dmac->channels_mask &= GENMASK(dmac->n_channels - 1, 0);
 
        return 0;
 }
@@ -1813,10 +1831,14 @@ static int rcar_dmac_probe(struct platform_device *pdev)
                DMA_SLAVE_BUSWIDTH_32_BYTES | DMA_SLAVE_BUSWIDTH_64_BYTES;
        struct dma_device *engine;
        struct rcar_dmac *dmac;
-       struct resource *mem;
+       const struct rcar_dmac_of_data *data;
        unsigned int i;
        int ret;
 
+       data = of_device_get_match_data(&pdev->dev);
+       if (!data)
+               return -EINVAL;
+
        dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL);
        if (!dmac)
                return -ENOMEM;
@@ -1848,8 +1870,7 @@ static int rcar_dmac_probe(struct platform_device *pdev)
                return -ENOMEM;
 
        /* Request resources. */
-       mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       dmac->iomem = devm_ioremap_resource(&pdev->dev, mem);
+       dmac->iomem = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(dmac->iomem))
                return PTR_ERR(dmac->iomem);
 
@@ -1901,7 +1922,7 @@ static int rcar_dmac_probe(struct platform_device *pdev)
                if (!(dmac->channels_mask & BIT(i)))
                        continue;
 
-               ret = rcar_dmac_chan_probe(dmac, &dmac->channels[i], i);
+               ret = rcar_dmac_chan_probe(dmac, &dmac->channels[i], data, i);
                if (ret < 0)
                        goto error;
        }
@@ -1948,8 +1969,16 @@ static void rcar_dmac_shutdown(struct platform_device *pdev)
        rcar_dmac_stop_all_chan(dmac);
 }
 
+static const struct rcar_dmac_of_data rcar_dmac_data = {
+       .chan_offset_base = 0x8000,
+       .chan_offset_stride = 0x80,
+};
+
 static const struct of_device_id rcar_dmac_of_ids[] = {
-       { .compatible = "renesas,rcar-dmac", },
+       {
+               .compatible = "renesas,rcar-dmac",
+               .data = &rcar_dmac_data,
+       },
        { /* Sentinel */ }
 };
 MODULE_DEVICE_TABLE(of, rcar_dmac_of_ids);
index 8546ad03472083d76156995158e860e106be028b..32402c2615ded458731439f182f50f143541ea52 100644 (file)
@@ -1080,7 +1080,6 @@ static int sprd_dma_probe(struct platform_device *pdev)
        struct device_node *np = pdev->dev.of_node;
        struct sprd_dma_dev *sdev;
        struct sprd_dma_chn *dma_chn;
-       struct resource *res;
        u32 chn_count;
        int ret, i;
 
@@ -1126,8 +1125,7 @@ static int sprd_dma_probe(struct platform_device *pdev)
                dev_warn(&pdev->dev, "no interrupts for the dma controller\n");
        }
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       sdev->glb_base = devm_ioremap_resource(&pdev->dev, res);
+       sdev->glb_base = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(sdev->glb_base))
                return PTR_ERR(sdev->glb_base);
 
index ba7c4f07fcd6f8592cbe95e635b1091b376b9c8d..0ecfc2e1d798cdeab0aafef3f2d6fc5a275bcab2 100644 (file)
@@ -260,6 +260,13 @@ struct edma_cc {
         */
        unsigned long *slot_inuse;
 
+       /*
+        * For tracking reserved channels used by DSP.
+        * If the bit is cleared, the channel is allocated to be used by DSP
+        * and Linux must not touch it.
+        */
+       unsigned long *channels_mask;
+
        struct dma_device               dma_slave;
        struct dma_device               *dma_memcpy;
        struct edma_chan                *slave_chans;
@@ -716,6 +723,12 @@ static int edma_alloc_channel(struct edma_chan *echan,
        struct edma_cc *ecc = echan->ecc;
        int channel = EDMA_CHAN_SLOT(echan->ch_num);
 
+       if (!test_bit(echan->ch_num, ecc->channels_mask)) {
+               dev_err(ecc->dev, "Channel%d is reserved, can not be used!\n",
+                       echan->ch_num);
+               return -EINVAL;
+       }
+
        /* ensure access through shadow region 0 */
        edma_or_array2(ecc, EDMA_DRAE, 0, EDMA_REG_ARRAY_INDEX(channel),
                       EDMA_CHANNEL_BIT(channel));
@@ -2249,10 +2262,8 @@ static int edma_probe(struct platform_device *pdev)
 {
        struct edma_soc_info    *info = pdev->dev.platform_data;
        s8                      (*queue_priority_mapping)[2];
-       int                     i, off;
-       const s16               (*rsv_slots)[2];
-       const s16               (*xbar_chans)[2];
-       int                     irq;
+       const s16               (*reserved)[2];
+       int                     i, irq;
        char                    *irq_name;
        struct resource         *mem;
        struct device_node      *node = pdev->dev.of_node;
@@ -2331,15 +2342,32 @@ static int edma_probe(struct platform_device *pdev)
        if (!ecc->slot_inuse)
                return -ENOMEM;
 
+       ecc->channels_mask = devm_kcalloc(dev,
+                                          BITS_TO_LONGS(ecc->num_channels),
+                                          sizeof(unsigned long), GFP_KERNEL);
+       if (!ecc->channels_mask)
+               return -ENOMEM;
+
+       /* Mark all channels available initially */
+       bitmap_fill(ecc->channels_mask, ecc->num_channels);
+
        ecc->default_queue = info->default_queue;
 
        if (info->rsv) {
                /* Set the reserved slots in inuse list */
-               rsv_slots = info->rsv->rsv_slots;
-               if (rsv_slots) {
-                       for (i = 0; rsv_slots[i][0] != -1; i++)
-                               bitmap_set(ecc->slot_inuse, rsv_slots[i][0],
-                                          rsv_slots[i][1]);
+               reserved = info->rsv->rsv_slots;
+               if (reserved) {
+                       for (i = 0; reserved[i][0] != -1; i++)
+                               bitmap_set(ecc->slot_inuse, reserved[i][0],
+                                          reserved[i][1]);
+               }
+
+               /* Clear channels not usable for Linux */
+               reserved = info->rsv->rsv_chans;
+               if (reserved) {
+                       for (i = 0; reserved[i][0] != -1; i++)
+                               bitmap_clear(ecc->channels_mask, reserved[i][0],
+                                            reserved[i][1]);
                }
        }
 
@@ -2349,14 +2377,6 @@ static int edma_probe(struct platform_device *pdev)
                        edma_write_slot(ecc, i, &dummy_paramset);
        }
 
-       /* Clear the xbar mapped channels in unused list */
-       xbar_chans = info->xbar_chans;
-       if (xbar_chans) {
-               for (i = 0; xbar_chans[i][1] != -1; i++) {
-                       off = xbar_chans[i][1];
-               }
-       }
-
        irq = platform_get_irq_byname(pdev, "edma3_ccint");
        if (irq < 0 && node)
                irq = irq_of_parse_and_map(node, 0);
@@ -2399,6 +2419,7 @@ static int edma_probe(struct platform_device *pdev)
 
        if (!ecc->legacy_mode) {
                int lowest_priority = 0;
+               unsigned int array_max;
                struct of_phandle_args tc_args;
 
                ecc->tc_list = devm_kcalloc(dev, ecc->num_tc,
@@ -2420,6 +2441,18 @@ static int edma_probe(struct platform_device *pdev)
                                info->default_queue = i;
                        }
                }
+
+               /* See if we have optional dma-channel-mask array */
+               array_max = DIV_ROUND_UP(ecc->num_channels, BITS_PER_TYPE(u32));
+               ret = of_property_read_variable_u32_array(node,
+                                               "dma-channel-mask",
+                                               (u32 *)ecc->channels_mask,
+                                               1, array_max);
+               if (ret > 0 && ret != array_max)
+                       dev_warn(dev, "dma-channel-mask is not complete.\n");
+               else if (ret == -EOVERFLOW || ret == -ENODATA)
+                       dev_warn(dev,
+                                "dma-channel-mask is out of range or empty\n");
        }
 
        /* Event queue priority mapping */
@@ -2437,6 +2470,10 @@ static int edma_probe(struct platform_device *pdev)
        edma_dma_init(ecc, legacy_mode);
 
        for (i = 0; i < ecc->num_channels; i++) {
+               /* Do not touch reserved channels */
+               if (!test_bit(i, ecc->channels_mask))
+                       continue;
+
                /* Assign all channels to the default queue */
                edma_assign_channel_eventq(&ecc->slave_chans[i],
                                           info->default_queue);
index fde54687856b8340aee2b84b70777abc1a304d00..21b8f1131d550a9947c60c807e76437ca801e05f 100644 (file)
@@ -382,7 +382,6 @@ static int uniphier_mdmac_probe(struct platform_device *pdev)
        struct device *dev = &pdev->dev;
        struct uniphier_mdmac_device *mdev;
        struct dma_device *ddev;
-       struct resource *res;
        int nr_chans, ret, i;
 
        nr_chans = platform_irq_count(pdev);
@@ -398,8 +397,7 @@ static int uniphier_mdmac_probe(struct platform_device *pdev)
        if (!mdev)
                return -ENOMEM;
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       mdev->reg_base = devm_ioremap_resource(dev, res);
+       mdev->reg_base = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(mdev->reg_base))
                return PTR_ERR(mdev->reg_base);
 
index 5d56f1e4d332ce5386b97efb0ec8063b1c659316..a9c5d5cc9f2bdba1f590d5dd92dc889f88c28e03 100644 (file)
  * The AXI CDMA, is a soft IP, which provides high-bandwidth Direct Memory
  * Access (DMA) between a memory-mapped source address and a memory-mapped
  * destination address.
+ *
+ * The AXI Multichannel Direct Memory Access (AXI MCDMA) core is a soft
+ * Xilinx IP that provides high-bandwidth direct memory access between
+ * memory and AXI4-Stream target peripherals. It provides scatter gather
+ * (SG) interface with multiple channels independent configuration support.
+ *
  */
 
 #include <linux/bitops.h>
 #define XILINX_DMA_NUM_DESCS           255
 #define XILINX_DMA_NUM_APP_WORDS       5
 
-/* Multi-Channel DMA Descriptor offsets*/
-#define XILINX_DMA_MCRX_CDESC(x)       (0x40 + (x-1) * 0x20)
-#define XILINX_DMA_MCRX_TDESC(x)       (0x48 + (x-1) * 0x20)
-
-/* Multi-Channel DMA Masks/Shifts */
-#define XILINX_DMA_BD_HSIZE_MASK       GENMASK(15, 0)
-#define XILINX_DMA_BD_STRIDE_MASK      GENMASK(15, 0)
-#define XILINX_DMA_BD_VSIZE_MASK       GENMASK(31, 19)
-#define XILINX_DMA_BD_TDEST_MASK       GENMASK(4, 0)
-#define XILINX_DMA_BD_STRIDE_SHIFT     0
-#define XILINX_DMA_BD_VSIZE_SHIFT      19
-
 /* AXI CDMA Specific Registers/Offsets */
 #define XILINX_CDMA_REG_SRCADDR                0x18
 #define XILINX_CDMA_REG_DSTADDR                0x20
 
 #define xilinx_prep_dma_addr_t(addr)   \
        ((dma_addr_t)((u64)addr##_##msb << 32 | (addr)))
+
+/* AXI MCDMA Specific Registers/Offsets */
+#define XILINX_MCDMA_MM2S_CTRL_OFFSET          0x0000
+#define XILINX_MCDMA_S2MM_CTRL_OFFSET          0x0500
+#define XILINX_MCDMA_CHEN_OFFSET               0x0008
+#define XILINX_MCDMA_CH_ERR_OFFSET             0x0010
+#define XILINX_MCDMA_RXINT_SER_OFFSET          0x0020
+#define XILINX_MCDMA_TXINT_SER_OFFSET          0x0028
+#define XILINX_MCDMA_CHAN_CR_OFFSET(x)         (0x40 + (x) * 0x40)
+#define XILINX_MCDMA_CHAN_SR_OFFSET(x)         (0x44 + (x) * 0x40)
+#define XILINX_MCDMA_CHAN_CDESC_OFFSET(x)      (0x48 + (x) * 0x40)
+#define XILINX_MCDMA_CHAN_TDESC_OFFSET(x)      (0x50 + (x) * 0x40)
+
+/* AXI MCDMA Specific Masks/Shifts */
+#define XILINX_MCDMA_COALESCE_SHIFT            16
+#define XILINX_MCDMA_COALESCE_MAX              24
+#define XILINX_MCDMA_IRQ_ALL_MASK              GENMASK(7, 5)
+#define XILINX_MCDMA_COALESCE_MASK             GENMASK(23, 16)
+#define XILINX_MCDMA_CR_RUNSTOP_MASK           BIT(0)
+#define XILINX_MCDMA_IRQ_IOC_MASK              BIT(5)
+#define XILINX_MCDMA_IRQ_DELAY_MASK            BIT(6)
+#define XILINX_MCDMA_IRQ_ERR_MASK              BIT(7)
+#define XILINX_MCDMA_BD_EOP                    BIT(30)
+#define XILINX_MCDMA_BD_SOP                    BIT(31)
+
 /**
  * struct xilinx_vdma_desc_hw - Hardware Descriptor
  * @next_desc: Next Descriptor Pointer @0x00
@@ -221,8 +240,8 @@ struct xilinx_vdma_desc_hw {
  * @next_desc_msb: MSB of Next Descriptor Pointer @0x04
  * @buf_addr: Buffer address @0x08
  * @buf_addr_msb: MSB of Buffer address @0x0C
- * @mcdma_control: Control field for mcdma @0x10
- * @vsize_stride: Vsize and Stride field for mcdma @0x14
+ * @reserved1: Reserved @0x10
+ * @reserved2: Reserved @0x14
  * @control: Control field @0x18
  * @status: Status field @0x1C
  * @app: APP Fields @0x20 - 0x30
@@ -232,13 +251,37 @@ struct xilinx_axidma_desc_hw {
        u32 next_desc_msb;
        u32 buf_addr;
        u32 buf_addr_msb;
-       u32 mcdma_control;
-       u32 vsize_stride;
+       u32 reserved1;
+       u32 reserved2;
        u32 control;
        u32 status;
        u32 app[XILINX_DMA_NUM_APP_WORDS];
 } __aligned(64);
 
+/**
+ * struct xilinx_aximcdma_desc_hw - Hardware Descriptor for AXI MCDMA
+ * @next_desc: Next Descriptor Pointer @0x00
+ * @next_desc_msb: MSB of Next Descriptor Pointer @0x04
+ * @buf_addr: Buffer address @0x08
+ * @buf_addr_msb: MSB of Buffer address @0x0C
+ * @rsvd: Reserved field @0x10
+ * @control: Control Information field @0x14
+ * @status: Status field @0x18
+ * @sideband_status: Status of sideband signals @0x1C
+ * @app: APP Fields @0x20 - 0x30
+ */
+struct xilinx_aximcdma_desc_hw {
+       u32 next_desc;
+       u32 next_desc_msb;
+       u32 buf_addr;
+       u32 buf_addr_msb;
+       u32 rsvd;
+       u32 control;
+       u32 status;
+       u32 sideband_status;
+       u32 app[XILINX_DMA_NUM_APP_WORDS];
+} __aligned(64);
+
 /**
  * struct xilinx_cdma_desc_hw - Hardware Descriptor
  * @next_desc: Next Descriptor Pointer @0x00
@@ -285,6 +328,18 @@ struct xilinx_axidma_tx_segment {
        dma_addr_t phys;
 } __aligned(64);
 
+/**
+ * struct xilinx_aximcdma_tx_segment - Descriptor segment
+ * @hw: Hardware descriptor
+ * @node: Node in the descriptor segments list
+ * @phys: Physical address of segment
+ */
+struct xilinx_aximcdma_tx_segment {
+       struct xilinx_aximcdma_desc_hw hw;
+       struct list_head node;
+       dma_addr_t phys;
+} __aligned(64);
+
 /**
  * struct xilinx_cdma_tx_segment - Descriptor segment
  * @hw: Hardware descriptor
@@ -303,12 +358,16 @@ struct xilinx_cdma_tx_segment {
  * @segments: TX segments list
  * @node: Node in the channel descriptors list
  * @cyclic: Check for cyclic transfers.
+ * @err: Whether the descriptor has an error.
+ * @residue: Residue of the completed descriptor
  */
 struct xilinx_dma_tx_descriptor {
        struct dma_async_tx_descriptor async_tx;
        struct list_head segments;
        struct list_head node;
        bool cyclic;
+       bool err;
+       u32 residue;
 };
 
 /**
@@ -339,8 +398,8 @@ struct xilinx_dma_tx_descriptor {
  * @desc_pendingcount: Descriptor pending count
  * @ext_addr: Indicates 64 bit addressing is supported by dma channel
  * @desc_submitcount: Descriptor h/w submitted count
- * @residue: Residue for AXI DMA
  * @seg_v: Statically allocated segments base
+ * @seg_mv: Statically allocated segments base for MCDMA
  * @seg_p: Physical allocated segments base
  * @cyclic_seg_v: Statically allocated segment base for cyclic transfers
  * @cyclic_seg_p: Physical allocated segments base for cyclic dma
@@ -376,8 +435,8 @@ struct xilinx_dma_chan {
        u32 desc_pendingcount;
        bool ext_addr;
        u32 desc_submitcount;
-       u32 residue;
        struct xilinx_axidma_tx_segment *seg_v;
+       struct xilinx_aximcdma_tx_segment *seg_mv;
        dma_addr_t seg_p;
        struct xilinx_axidma_tx_segment *cyclic_seg_v;
        dma_addr_t cyclic_seg_p;
@@ -393,12 +452,14 @@ struct xilinx_dma_chan {
  * @XDMA_TYPE_AXIDMA: Axi dma ip.
  * @XDMA_TYPE_CDMA: Axi cdma ip.
  * @XDMA_TYPE_VDMA: Axi vdma ip.
+ * @XDMA_TYPE_AXIMCDMA: Axi MCDMA ip.
  *
  */
 enum xdma_ip_type {
        XDMA_TYPE_AXIDMA = 0,
        XDMA_TYPE_CDMA,
        XDMA_TYPE_VDMA,
+       XDMA_TYPE_AXIMCDMA
 };
 
 struct xilinx_dma_config {
@@ -406,6 +467,7 @@ struct xilinx_dma_config {
        int (*clk_init)(struct platform_device *pdev, struct clk **axi_clk,
                        struct clk **tx_clk, struct clk **txs_clk,
                        struct clk **rx_clk, struct clk **rxs_clk);
+       irqreturn_t (*irq_handler)(int irq, void *data);
 };
 
 /**
@@ -414,7 +476,6 @@ struct xilinx_dma_config {
  * @dev: Device Structure
  * @common: DMA device structure
  * @chan: Driver specific DMA channel
- * @mcdma: Specifies whether Multi-Channel is present or not
  * @flush_on_fsync: Flush on frame sync
  * @ext_addr: Indicates 64 bit addressing is supported by dma device
  * @pdev: Platform device structure pointer
@@ -427,13 +488,13 @@ struct xilinx_dma_config {
  * @nr_channels: Number of channels DMA device supports
  * @chan_id: DMA channel identifier
  * @max_buffer_len: Max buffer length
+ * @s2mm_index: S2MM channel index
  */
 struct xilinx_dma_device {
        void __iomem *regs;
        struct device *dev;
        struct dma_device common;
        struct xilinx_dma_chan *chan[XILINX_DMA_MAX_CHANS_PER_DEVICE];
-       bool mcdma;
        u32 flush_on_fsync;
        bool ext_addr;
        struct platform_device  *pdev;
@@ -446,6 +507,7 @@ struct xilinx_dma_device {
        u32 nr_channels;
        u32 chan_id;
        u32 max_buffer_len;
+       u32 s2mm_index;
 };
 
 /* Macros */
@@ -546,6 +608,18 @@ static inline void xilinx_axidma_buf(struct xilinx_dma_chan *chan,
        }
 }
 
+static inline void xilinx_aximcdma_buf(struct xilinx_dma_chan *chan,
+                                      struct xilinx_aximcdma_desc_hw *hw,
+                                      dma_addr_t buf_addr, size_t sg_used)
+{
+       if (chan->ext_addr) {
+               hw->buf_addr = lower_32_bits(buf_addr + sg_used);
+               hw->buf_addr_msb = upper_32_bits(buf_addr + sg_used);
+       } else {
+               hw->buf_addr = buf_addr + sg_used;
+       }
+}
+
 /* -----------------------------------------------------------------------------
  * Descriptors and segments alloc and free
  */
@@ -613,6 +687,33 @@ xilinx_axidma_alloc_tx_segment(struct xilinx_dma_chan *chan)
        }
        spin_unlock_irqrestore(&chan->lock, flags);
 
+       if (!segment)
+               dev_dbg(chan->dev, "Could not find free tx segment\n");
+
+       return segment;
+}
+
+/**
+ * xilinx_aximcdma_alloc_tx_segment - Allocate transaction segment
+ * @chan: Driver specific DMA channel
+ *
+ * Return: The allocated segment on success and NULL on failure.
+ */
+static struct xilinx_aximcdma_tx_segment *
+xilinx_aximcdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
+{
+       struct xilinx_aximcdma_tx_segment *segment = NULL;
+       unsigned long flags;
+
+       spin_lock_irqsave(&chan->lock, flags);
+       if (!list_empty(&chan->free_seg_list)) {
+               segment = list_first_entry(&chan->free_seg_list,
+                                          struct xilinx_aximcdma_tx_segment,
+                                          node);
+               list_del(&segment->node);
+       }
+       spin_unlock_irqrestore(&chan->lock, flags);
+
        return segment;
 }
 
@@ -627,6 +728,17 @@ static void xilinx_dma_clean_hw_desc(struct xilinx_axidma_desc_hw *hw)
        hw->next_desc_msb = next_desc_msb;
 }
 
+static void xilinx_mcdma_clean_hw_desc(struct xilinx_aximcdma_desc_hw *hw)
+{
+       u32 next_desc = hw->next_desc;
+       u32 next_desc_msb = hw->next_desc_msb;
+
+       memset(hw, 0, sizeof(struct xilinx_aximcdma_desc_hw));
+
+       hw->next_desc = next_desc;
+       hw->next_desc_msb = next_desc_msb;
+}
+
 /**
  * xilinx_dma_free_tx_segment - Free transaction segment
  * @chan: Driver specific DMA channel
@@ -640,6 +752,20 @@ static void xilinx_dma_free_tx_segment(struct xilinx_dma_chan *chan,
        list_add_tail(&segment->node, &chan->free_seg_list);
 }
 
+/**
+ * xilinx_mcdma_free_tx_segment - Free transaction segment
+ * @chan: Driver specific DMA channel
+ * @segment: DMA transaction segment
+ */
+static void xilinx_mcdma_free_tx_segment(struct xilinx_dma_chan *chan,
+                                        struct xilinx_aximcdma_tx_segment *
+                                        segment)
+{
+       xilinx_mcdma_clean_hw_desc(&segment->hw);
+
+       list_add_tail(&segment->node, &chan->free_seg_list);
+}
+
 /**
  * xilinx_cdma_free_tx_segment - Free transaction segment
  * @chan: Driver specific DMA channel
@@ -694,6 +820,7 @@ xilinx_dma_free_tx_descriptor(struct xilinx_dma_chan *chan,
        struct xilinx_vdma_tx_segment *segment, *next;
        struct xilinx_cdma_tx_segment *cdma_segment, *cdma_next;
        struct xilinx_axidma_tx_segment *axidma_segment, *axidma_next;
+       struct xilinx_aximcdma_tx_segment *aximcdma_segment, *aximcdma_next;
 
        if (!desc)
                return;
@@ -709,12 +836,18 @@ xilinx_dma_free_tx_descriptor(struct xilinx_dma_chan *chan,
                        list_del(&cdma_segment->node);
                        xilinx_cdma_free_tx_segment(chan, cdma_segment);
                }
-       } else {
+       } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
                list_for_each_entry_safe(axidma_segment, axidma_next,
                                         &desc->segments, node) {
                        list_del(&axidma_segment->node);
                        xilinx_dma_free_tx_segment(chan, axidma_segment);
                }
+       } else {
+               list_for_each_entry_safe(aximcdma_segment, aximcdma_next,
+                                        &desc->segments, node) {
+                       list_del(&aximcdma_segment->node);
+                       xilinx_mcdma_free_tx_segment(chan, aximcdma_segment);
+               }
        }
 
        kfree(desc);
@@ -783,10 +916,61 @@ static void xilinx_dma_free_chan_resources(struct dma_chan *dchan)
                                  chan->cyclic_seg_v, chan->cyclic_seg_p);
        }
 
-       if (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA) {
+       if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) {
+               spin_lock_irqsave(&chan->lock, flags);
+               INIT_LIST_HEAD(&chan->free_seg_list);
+               spin_unlock_irqrestore(&chan->lock, flags);
+
+               /* Free memory that is allocated for BD */
+               dma_free_coherent(chan->dev, sizeof(*chan->seg_mv) *
+                                 XILINX_DMA_NUM_DESCS, chan->seg_mv,
+                                 chan->seg_p);
+       }
+
+       if (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA &&
+           chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIMCDMA) {
                dma_pool_destroy(chan->desc_pool);
                chan->desc_pool = NULL;
        }
+
+}
+
+/**
+ * xilinx_dma_get_residue - Compute residue for a given descriptor
+ * @chan: Driver specific dma channel
+ * @desc: dma transaction descriptor
+ *
+ * Return: The number of residue bytes for the descriptor.
+ */
+static u32 xilinx_dma_get_residue(struct xilinx_dma_chan *chan,
+                                 struct xilinx_dma_tx_descriptor *desc)
+{
+       struct xilinx_cdma_tx_segment *cdma_seg;
+       struct xilinx_axidma_tx_segment *axidma_seg;
+       struct xilinx_cdma_desc_hw *cdma_hw;
+       struct xilinx_axidma_desc_hw *axidma_hw;
+       struct list_head *entry;
+       u32 residue = 0;
+
+       list_for_each(entry, &desc->segments) {
+               if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
+                       cdma_seg = list_entry(entry,
+                                             struct xilinx_cdma_tx_segment,
+                                             node);
+                       cdma_hw = &cdma_seg->hw;
+                       residue += (cdma_hw->control - cdma_hw->status) &
+                                  chan->xdev->max_buffer_len;
+               } else {
+                       axidma_seg = list_entry(entry,
+                                               struct xilinx_axidma_tx_segment,
+                                               node);
+                       axidma_hw = &axidma_seg->hw;
+                       residue += (axidma_hw->control - axidma_hw->status) &
+                                  chan->xdev->max_buffer_len;
+               }
+       }
+
+       return residue;
 }
 
 /**
@@ -823,7 +1007,7 @@ static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan)
        spin_lock_irqsave(&chan->lock, flags);
 
        list_for_each_entry_safe(desc, next, &chan->done_list, node) {
-               struct dmaengine_desc_callback cb;
+               struct dmaengine_result result;
 
                if (desc->cyclic) {
                        xilinx_dma_chan_handle_cyclic(chan, desc, &flags);
@@ -833,14 +1017,22 @@ static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan)
                /* Remove from the list of running transactions */
                list_del(&desc->node);
 
-               /* Run the link descriptor callback function */
-               dmaengine_desc_get_callback(&desc->async_tx, &cb);
-               if (dmaengine_desc_callback_valid(&cb)) {
-                       spin_unlock_irqrestore(&chan->lock, flags);
-                       dmaengine_desc_callback_invoke(&cb, NULL);
-                       spin_lock_irqsave(&chan->lock, flags);
+               if (unlikely(desc->err)) {
+                       if (chan->direction == DMA_DEV_TO_MEM)
+                               result.result = DMA_TRANS_READ_FAILED;
+                       else
+                               result.result = DMA_TRANS_WRITE_FAILED;
+               } else {
+                       result.result = DMA_TRANS_NOERROR;
                }
 
+               result.residue = desc->residue;
+
+               /* Run the link descriptor callback function */
+               spin_unlock_irqrestore(&chan->lock, flags);
+               dmaengine_desc_get_callback_invoke(&desc->async_tx, &result);
+               spin_lock_irqsave(&chan->lock, flags);
+
                /* Run any dependencies, then free the descriptor */
                dma_run_dependencies(&desc->async_tx);
                xilinx_dma_free_tx_descriptor(chan, desc);
@@ -922,6 +1114,30 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
                        list_add_tail(&chan->seg_v[i].node,
                                      &chan->free_seg_list);
                }
+       } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) {
+               /* Allocate the buffer descriptors. */
+               chan->seg_mv = dma_alloc_coherent(chan->dev,
+                                                 sizeof(*chan->seg_mv) *
+                                                 XILINX_DMA_NUM_DESCS,
+                                                 &chan->seg_p, GFP_KERNEL);
+               if (!chan->seg_mv) {
+                       dev_err(chan->dev,
+                               "unable to allocate channel %d descriptors\n",
+                               chan->id);
+                       return -ENOMEM;
+               }
+               for (i = 0; i < XILINX_DMA_NUM_DESCS; i++) {
+                       chan->seg_mv[i].hw.next_desc =
+                       lower_32_bits(chan->seg_p + sizeof(*chan->seg_mv) *
+                               ((i + 1) % XILINX_DMA_NUM_DESCS));
+                       chan->seg_mv[i].hw.next_desc_msb =
+                       upper_32_bits(chan->seg_p + sizeof(*chan->seg_mv) *
+                               ((i + 1) % XILINX_DMA_NUM_DESCS));
+                       chan->seg_mv[i].phys = chan->seg_p +
+                               sizeof(*chan->seg_v) * i;
+                       list_add_tail(&chan->seg_mv[i].node,
+                                     &chan->free_seg_list);
+               }
        } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
                chan->desc_pool = dma_pool_create("xilinx_cdma_desc_pool",
                                   chan->dev,
@@ -937,7 +1153,8 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
        }
 
        if (!chan->desc_pool &&
-           (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA)) {
+           ((chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA) &&
+               chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIMCDMA)) {
                dev_err(chan->dev,
                        "unable to allocate channel %d descriptor pool\n",
                        chan->id);
@@ -1003,8 +1220,6 @@ static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan,
 {
        struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
        struct xilinx_dma_tx_descriptor *desc;
-       struct xilinx_axidma_tx_segment *segment;
-       struct xilinx_axidma_desc_hw *hw;
        enum dma_status ret;
        unsigned long flags;
        u32 residue = 0;
@@ -1013,23 +1228,20 @@ static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan,
        if (ret == DMA_COMPLETE || !txstate)
                return ret;
 
-       if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
-               spin_lock_irqsave(&chan->lock, flags);
+       spin_lock_irqsave(&chan->lock, flags);
 
-               desc = list_last_entry(&chan->active_list,
-                                      struct xilinx_dma_tx_descriptor, node);
-               if (chan->has_sg) {
-                       list_for_each_entry(segment, &desc->segments, node) {
-                               hw = &segment->hw;
-                               residue += (hw->control - hw->status) &
-                                          chan->xdev->max_buffer_len;
-                       }
-               }
-               spin_unlock_irqrestore(&chan->lock, flags);
+       desc = list_last_entry(&chan->active_list,
+                              struct xilinx_dma_tx_descriptor, node);
+       /*
+        * VDMA and simple mode do not support residue reporting, so the
+        * residue field will always be 0.
+        */
+       if (chan->has_sg && chan->xdev->dma_config->dmatype != XDMA_TYPE_VDMA)
+               residue = xilinx_dma_get_residue(chan, desc);
 
-               chan->residue = residue;
-               dma_set_residue(txstate, chan->residue);
-       }
+       spin_unlock_irqrestore(&chan->lock, flags);
+
+       dma_set_residue(txstate, residue);
 
        return ret;
 }
@@ -1301,53 +1513,23 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
                dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
        }
 
-       if (chan->has_sg && !chan->xdev->mcdma)
+       if (chan->has_sg)
                xilinx_write(chan, XILINX_DMA_REG_CURDESC,
                             head_desc->async_tx.phys);
 
-       if (chan->has_sg && chan->xdev->mcdma) {
-               if (chan->direction == DMA_MEM_TO_DEV) {
-                       dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC,
-                                      head_desc->async_tx.phys);
-               } else {
-                       if (!chan->tdest) {
-                               dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC,
-                                      head_desc->async_tx.phys);
-                       } else {
-                               dma_ctrl_write(chan,
-                                       XILINX_DMA_MCRX_CDESC(chan->tdest),
-                                      head_desc->async_tx.phys);
-                       }
-               }
-       }
-
        xilinx_dma_start(chan);
 
        if (chan->err)
                return;
 
        /* Start the transfer */
-       if (chan->has_sg && !chan->xdev->mcdma) {
+       if (chan->has_sg) {
                if (chan->cyclic)
                        xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
                                     chan->cyclic_seg_v->phys);
                else
                        xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
                                     tail_segment->phys);
-       } else if (chan->has_sg && chan->xdev->mcdma) {
-               if (chan->direction == DMA_MEM_TO_DEV) {
-                       dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
-                              tail_segment->phys);
-               } else {
-                       if (!chan->tdest) {
-                               dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
-                                              tail_segment->phys);
-                       } else {
-                               dma_ctrl_write(chan,
-                                       XILINX_DMA_MCRX_TDESC(chan->tdest),
-                                       tail_segment->phys);
-                       }
-               }
        } else {
                struct xilinx_axidma_tx_segment *segment;
                struct xilinx_axidma_desc_hw *hw;
@@ -1370,6 +1552,76 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
        chan->idle = false;
 }
 
+/**
+ * xilinx_mcdma_start_transfer - Starts MCDMA transfer
+ * @chan: Driver specific channel struct pointer
+ */
+static void xilinx_mcdma_start_transfer(struct xilinx_dma_chan *chan)
+{
+       struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
+       struct xilinx_axidma_tx_segment *tail_segment;
+       u32 reg;
+
+       /*
+        * lock has been held by calling functions, so we don't need it
+        * to take it here again.
+        */
+
+       if (chan->err)
+               return;
+
+       if (!chan->idle)
+               return;
+
+       if (list_empty(&chan->pending_list))
+               return;
+
+       head_desc = list_first_entry(&chan->pending_list,
+                                    struct xilinx_dma_tx_descriptor, node);
+       tail_desc = list_last_entry(&chan->pending_list,
+                                   struct xilinx_dma_tx_descriptor, node);
+       tail_segment = list_last_entry(&tail_desc->segments,
+                                      struct xilinx_axidma_tx_segment, node);
+
+       reg = dma_ctrl_read(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest));
+
+       if (chan->desc_pendingcount <= XILINX_MCDMA_COALESCE_MAX) {
+               reg &= ~XILINX_MCDMA_COALESCE_MASK;
+               reg |= chan->desc_pendingcount <<
+                       XILINX_MCDMA_COALESCE_SHIFT;
+       }
+
+       reg |= XILINX_MCDMA_IRQ_ALL_MASK;
+       dma_ctrl_write(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest), reg);
+
+       /* Program current descriptor */
+       xilinx_write(chan, XILINX_MCDMA_CHAN_CDESC_OFFSET(chan->tdest),
+                    head_desc->async_tx.phys);
+
+       /* Program channel enable register */
+       reg = dma_ctrl_read(chan, XILINX_MCDMA_CHEN_OFFSET);
+       reg |= BIT(chan->tdest);
+       dma_ctrl_write(chan, XILINX_MCDMA_CHEN_OFFSET, reg);
+
+       /* Start the fetch of BDs for the channel */
+       reg = dma_ctrl_read(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest));
+       reg |= XILINX_MCDMA_CR_RUNSTOP_MASK;
+       dma_ctrl_write(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest), reg);
+
+       xilinx_dma_start(chan);
+
+       if (chan->err)
+               return;
+
+       /* Start the transfer */
+       xilinx_write(chan, XILINX_MCDMA_CHAN_TDESC_OFFSET(chan->tdest),
+                    tail_segment->phys);
+
+       list_splice_tail_init(&chan->pending_list, &chan->active_list);
+       chan->desc_pendingcount = 0;
+       chan->idle = false;
+}
+
 /**
  * xilinx_dma_issue_pending - Issue pending transactions
  * @dchan: DMA channel
@@ -1399,6 +1651,13 @@ static void xilinx_dma_complete_descriptor(struct xilinx_dma_chan *chan)
                return;
 
        list_for_each_entry_safe(desc, next, &chan->active_list, node) {
+               if (chan->has_sg && chan->xdev->dma_config->dmatype !=
+                   XDMA_TYPE_VDMA)
+                       desc->residue = xilinx_dma_get_residue(chan, desc);
+               else
+                       desc->residue = 0;
+               desc->err = chan->err;
+
                list_del(&desc->node);
                if (!desc->cyclic)
                        dma_cookie_complete(&desc->async_tx);
@@ -1433,6 +1692,7 @@ static int xilinx_dma_reset(struct xilinx_dma_chan *chan)
 
        chan->err = false;
        chan->idle = true;
+       chan->desc_pendingcount = 0;
        chan->desc_submitcount = 0;
 
        return err;
@@ -1460,6 +1720,74 @@ static int xilinx_dma_chan_reset(struct xilinx_dma_chan *chan)
        return 0;
 }
 
+/**
+ * xilinx_mcdma_irq_handler - MCDMA Interrupt handler
+ * @irq: IRQ number
+ * @data: Pointer to the Xilinx MCDMA channel structure
+ *
+ * Return: IRQ_HANDLED/IRQ_NONE
+ */
+static irqreturn_t xilinx_mcdma_irq_handler(int irq, void *data)
+{
+       struct xilinx_dma_chan *chan = data;
+       u32 status, ser_offset, chan_sermask, chan_offset = 0, chan_id;
+
+       if (chan->direction == DMA_DEV_TO_MEM)
+               ser_offset = XILINX_MCDMA_RXINT_SER_OFFSET;
+       else
+               ser_offset = XILINX_MCDMA_TXINT_SER_OFFSET;
+
+       /* Read the channel id raising the interrupt*/
+       chan_sermask = dma_ctrl_read(chan, ser_offset);
+       chan_id = ffs(chan_sermask);
+
+       if (!chan_id)
+               return IRQ_NONE;
+
+       if (chan->direction == DMA_DEV_TO_MEM)
+               chan_offset = chan->xdev->s2mm_index;
+
+       chan_offset = chan_offset + (chan_id - 1);
+       chan = chan->xdev->chan[chan_offset];
+       /* Read the status and ack the interrupts. */
+       status = dma_ctrl_read(chan, XILINX_MCDMA_CHAN_SR_OFFSET(chan->tdest));
+       if (!(status & XILINX_MCDMA_IRQ_ALL_MASK))
+               return IRQ_NONE;
+
+       dma_ctrl_write(chan, XILINX_MCDMA_CHAN_SR_OFFSET(chan->tdest),
+                      status & XILINX_MCDMA_IRQ_ALL_MASK);
+
+       if (status & XILINX_MCDMA_IRQ_ERR_MASK) {
+               dev_err(chan->dev, "Channel %p has errors %x cdr %x tdr %x\n",
+                       chan,
+                       dma_ctrl_read(chan, XILINX_MCDMA_CH_ERR_OFFSET),
+                       dma_ctrl_read(chan, XILINX_MCDMA_CHAN_CDESC_OFFSET
+                                     (chan->tdest)),
+                       dma_ctrl_read(chan, XILINX_MCDMA_CHAN_TDESC_OFFSET
+                                     (chan->tdest)));
+               chan->err = true;
+       }
+
+       if (status & XILINX_MCDMA_IRQ_DELAY_MASK) {
+               /*
+                * Device takes too long to do the transfer when user requires
+                * responsiveness.
+                */
+               dev_dbg(chan->dev, "Inter-packet latency too long\n");
+       }
+
+       if (status & XILINX_MCDMA_IRQ_IOC_MASK) {
+               spin_lock(&chan->lock);
+               xilinx_dma_complete_descriptor(chan);
+               chan->idle = true;
+               chan->start_transfer(chan);
+               spin_unlock(&chan->lock);
+       }
+
+       tasklet_schedule(&chan->tasklet);
+       return IRQ_HANDLED;
+}
+
 /**
  * xilinx_dma_irq_handler - DMA Interrupt handler
  * @irq: IRQ number
@@ -1967,31 +2295,32 @@ static struct dma_async_tx_descriptor *xilinx_dma_prep_dma_cyclic(
 }
 
 /**
- * xilinx_dma_prep_interleaved - prepare a descriptor for a
- *     DMA_SLAVE transaction
+ * xilinx_mcdma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
  * @dchan: DMA channel
- * @xt: Interleaved template pointer
+ * @sgl: scatterlist to transfer to/from
+ * @sg_len: number of entries in @scatterlist
+ * @direction: DMA direction
  * @flags: transfer ack flags
+ * @context: APP words of the descriptor
  *
  * Return: Async transaction descriptor on success and NULL on failure
  */
 static struct dma_async_tx_descriptor *
-xilinx_dma_prep_interleaved(struct dma_chan *dchan,
-                                struct dma_interleaved_template *xt,
-                                unsigned long flags)
+xilinx_mcdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
+                          unsigned int sg_len,
+                          enum dma_transfer_direction direction,
+                          unsigned long flags, void *context)
 {
        struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
        struct xilinx_dma_tx_descriptor *desc;
-       struct xilinx_axidma_tx_segment *segment;
-       struct xilinx_axidma_desc_hw *hw;
-
-       if (!is_slave_direction(xt->dir))
-               return NULL;
-
-       if (!xt->numf || !xt->sgl[0].size)
-               return NULL;
+       struct xilinx_aximcdma_tx_segment *segment = NULL;
+       u32 *app_w = (u32 *)context;
+       struct scatterlist *sg;
+       size_t copy;
+       size_t sg_used;
+       unsigned int i;
 
-       if (xt->frame_size != 1)
+       if (!is_slave_direction(direction))
                return NULL;
 
        /* Allocate a transaction descriptor. */
@@ -1999,54 +2328,67 @@ xilinx_dma_prep_interleaved(struct dma_chan *dchan,
        if (!desc)
                return NULL;
 
-       chan->direction = xt->dir;
        dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
        desc->async_tx.tx_submit = xilinx_dma_tx_submit;
 
-       /* Get a free segment */
-       segment = xilinx_axidma_alloc_tx_segment(chan);
-       if (!segment)
-               goto error;
+       /* Build transactions using information in the scatter gather list */
+       for_each_sg(sgl, sg, sg_len, i) {
+               sg_used = 0;
 
-       hw = &segment->hw;
+               /* Loop until the entire scatterlist entry is used */
+               while (sg_used < sg_dma_len(sg)) {
+                       struct xilinx_aximcdma_desc_hw *hw;
 
-       /* Fill in the descriptor */
-       if (xt->dir != DMA_MEM_TO_DEV)
-               hw->buf_addr = xt->dst_start;
-       else
-               hw->buf_addr = xt->src_start;
+                       /* Get a free segment */
+                       segment = xilinx_aximcdma_alloc_tx_segment(chan);
+                       if (!segment)
+                               goto error;
 
-       hw->mcdma_control = chan->tdest & XILINX_DMA_BD_TDEST_MASK;
-       hw->vsize_stride = (xt->numf << XILINX_DMA_BD_VSIZE_SHIFT) &
-                           XILINX_DMA_BD_VSIZE_MASK;
-       hw->vsize_stride |= (xt->sgl[0].icg + xt->sgl[0].size) &
-                           XILINX_DMA_BD_STRIDE_MASK;
-       hw->control = xt->sgl[0].size & XILINX_DMA_BD_HSIZE_MASK;
+                       /*
+                        * Calculate the maximum number of bytes to transfer,
+                        * making sure it is less than the hw limit
+                        */
+                       copy = min_t(size_t, sg_dma_len(sg) - sg_used,
+                                    chan->xdev->max_buffer_len);
+                       hw = &segment->hw;
 
-       /*
-        * Insert the segment into the descriptor segments
-        * list.
-        */
-       list_add_tail(&segment->node, &desc->segments);
+                       /* Fill in the descriptor */
+                       xilinx_aximcdma_buf(chan, hw, sg_dma_address(sg),
+                                           sg_used);
+                       hw->control = copy;
 
+                       if (chan->direction == DMA_MEM_TO_DEV && app_w) {
+                               memcpy(hw->app, app_w, sizeof(u32) *
+                                      XILINX_DMA_NUM_APP_WORDS);
+                       }
+
+                       sg_used += copy;
+                       /*
+                        * Insert the segment into the descriptor segments
+                        * list.
+                        */
+                       list_add_tail(&segment->node, &desc->segments);
+               }
+       }
 
        segment = list_first_entry(&desc->segments,
-                                  struct xilinx_axidma_tx_segment, node);
+                                  struct xilinx_aximcdma_tx_segment, node);
        desc->async_tx.phys = segment->phys;
 
        /* For the last DMA_MEM_TO_DEV transfer, set EOP */
-       if (xt->dir == DMA_MEM_TO_DEV) {
-               segment->hw.control |= XILINX_DMA_BD_SOP;
+       if (chan->direction == DMA_MEM_TO_DEV) {
+               segment->hw.control |= XILINX_MCDMA_BD_SOP;
                segment = list_last_entry(&desc->segments,
-                                         struct xilinx_axidma_tx_segment,
+                                         struct xilinx_aximcdma_tx_segment,
                                          node);
-               segment->hw.control |= XILINX_DMA_BD_EOP;
+               segment->hw.control |= XILINX_MCDMA_BD_EOP;
        }
 
        return &desc->async_tx;
 
 error:
        xilinx_dma_free_tx_descriptor(chan, desc);
+
        return NULL;
 }
 
@@ -2194,7 +2536,9 @@ static int axidma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
        *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
        if (IS_ERR(*axi_clk)) {
                err = PTR_ERR(*axi_clk);
-               dev_err(&pdev->dev, "failed to get axi_aclk (%d)\n", err);
+               if (err != -EPROBE_DEFER)
+                       dev_err(&pdev->dev, "failed to get axi_aclk (%d)\n",
+                               err);
                return err;
        }
 
@@ -2259,14 +2603,18 @@ static int axicdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
        *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
        if (IS_ERR(*axi_clk)) {
                err = PTR_ERR(*axi_clk);
-               dev_err(&pdev->dev, "failed to get axi_clk (%d)\n", err);
+               if (err != -EPROBE_DEFER)
+                       dev_err(&pdev->dev, "failed to get axi_clk (%d)\n",
+                               err);
                return err;
        }
 
        *dev_clk = devm_clk_get(&pdev->dev, "m_axi_aclk");
        if (IS_ERR(*dev_clk)) {
                err = PTR_ERR(*dev_clk);
-               dev_err(&pdev->dev, "failed to get dev_clk (%d)\n", err);
+               if (err != -EPROBE_DEFER)
+                       dev_err(&pdev->dev, "failed to get dev_clk (%d)\n",
+                               err);
                return err;
        }
 
@@ -2299,7 +2647,9 @@ static int axivdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
        *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
        if (IS_ERR(*axi_clk)) {
                err = PTR_ERR(*axi_clk);
-               dev_err(&pdev->dev, "failed to get axi_aclk (%d)\n", err);
+               if (err != -EPROBE_DEFER)
+                       dev_err(&pdev->dev, "failed to get axi_aclk (%d)\n",
+                               err);
                return err;
        }
 
@@ -2321,7 +2671,8 @@ static int axivdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
 
        err = clk_prepare_enable(*axi_clk);
        if (err) {
-               dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err);
+               dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n",
+                       err);
                return err;
        }
 
@@ -2454,6 +2805,7 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
                                           "xlnx,axi-dma-s2mm-channel")) {
                chan->direction = DMA_DEV_TO_MEM;
                chan->id = chan_id;
+               xdev->s2mm_index = xdev->nr_channels;
                chan->tdest = chan_id - xdev->nr_channels;
                chan->has_vflip = of_property_read_bool(node,
                                        "xlnx,enable-vert-flip");
@@ -2463,7 +2815,11 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
                                XILINX_VDMA_ENABLE_VERTICAL_FLIP;
                }
 
-               chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET;
+               if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA)
+                       chan->ctrl_offset = XILINX_MCDMA_S2MM_CTRL_OFFSET;
+               else
+                       chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET;
+
                if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
                        chan->desc_offset = XILINX_VDMA_S2MM_DESC_OFFSET;
                        chan->config.park = 1;
@@ -2478,9 +2834,9 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
        }
 
        /* Request the interrupt */
-       chan->irq = irq_of_parse_and_map(node, 0);
-       err = request_irq(chan->irq, xilinx_dma_irq_handler, IRQF_SHARED,
-                         "xilinx-dma-controller", chan);
+       chan->irq = irq_of_parse_and_map(node, chan->tdest);
+       err = request_irq(chan->irq, xdev->dma_config->irq_handler,
+                         IRQF_SHARED, "xilinx-dma-controller", chan);
        if (err) {
                dev_err(xdev->dev, "unable to request IRQ %d\n", chan->irq);
                return err;
@@ -2489,6 +2845,9 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
        if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
                chan->start_transfer = xilinx_dma_start_transfer;
                chan->stop_transfer = xilinx_dma_stop_transfer;
+       } else if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) {
+               chan->start_transfer = xilinx_mcdma_start_transfer;
+               chan->stop_transfer = xilinx_dma_stop_transfer;
        } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
                chan->start_transfer = xilinx_cdma_start_transfer;
                chan->stop_transfer = xilinx_cdma_stop_transfer;
@@ -2545,7 +2904,7 @@ static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev,
        int ret, i, nr_channels = 1;
 
        ret = of_property_read_u32(node, "dma-channels", &nr_channels);
-       if ((ret < 0) && xdev->mcdma)
+       if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA && ret < 0)
                dev_warn(xdev->dev, "missing dma-channels property\n");
 
        for (i = 0; i < nr_channels; i++)
@@ -2578,22 +2937,31 @@ static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
 static const struct xilinx_dma_config axidma_config = {
        .dmatype = XDMA_TYPE_AXIDMA,
        .clk_init = axidma_clk_init,
+       .irq_handler = xilinx_dma_irq_handler,
 };
 
+static const struct xilinx_dma_config aximcdma_config = {
+       .dmatype = XDMA_TYPE_AXIMCDMA,
+       .clk_init = axidma_clk_init,
+       .irq_handler = xilinx_mcdma_irq_handler,
+};
 static const struct xilinx_dma_config axicdma_config = {
        .dmatype = XDMA_TYPE_CDMA,
        .clk_init = axicdma_clk_init,
+       .irq_handler = xilinx_dma_irq_handler,
 };
 
 static const struct xilinx_dma_config axivdma_config = {
        .dmatype = XDMA_TYPE_VDMA,
        .clk_init = axivdma_clk_init,
+       .irq_handler = xilinx_dma_irq_handler,
 };
 
 static const struct of_device_id xilinx_dma_of_ids[] = {
        { .compatible = "xlnx,axi-dma-1.00.a", .data = &axidma_config },
        { .compatible = "xlnx,axi-cdma-1.00.a", .data = &axicdma_config },
        { .compatible = "xlnx,axi-vdma-1.00.a", .data = &axivdma_config },
+       { .compatible = "xlnx,axi-mcdma-1.00.a", .data = &aximcdma_config },
        {}
 };
 MODULE_DEVICE_TABLE(of, xilinx_dma_of_ids);
@@ -2612,7 +2980,6 @@ static int xilinx_dma_probe(struct platform_device *pdev)
        struct device_node *node = pdev->dev.of_node;
        struct xilinx_dma_device *xdev;
        struct device_node *child, *np = pdev->dev.of_node;
-       struct resource *io;
        u32 num_frames, addr_width, len_width;
        int i, err;
 
@@ -2638,16 +3005,15 @@ static int xilinx_dma_probe(struct platform_device *pdev)
                return err;
 
        /* Request and map I/O memory */
-       io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       xdev->regs = devm_ioremap_resource(&pdev->dev, io);
+       xdev->regs = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(xdev->regs))
                return PTR_ERR(xdev->regs);
 
        /* Retrieve the DMA engine properties from the device tree */
        xdev->max_buffer_len = GENMASK(XILINX_DMA_MAX_TRANS_LEN_MAX - 1, 0);
 
-       if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
-               xdev->mcdma = of_property_read_bool(node, "xlnx,mcdma");
+       if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA ||
+           xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) {
                if (!of_property_read_u32(node, "xlnx,sg-length-width",
                                          &len_width)) {
                        if (len_width < XILINX_DMA_MAX_TRANS_LEN_MIN ||
@@ -2712,14 +3078,17 @@ static int xilinx_dma_probe(struct platform_device *pdev)
                xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg;
                xdev->common.device_prep_dma_cyclic =
                                          xilinx_dma_prep_dma_cyclic;
-               xdev->common.device_prep_interleaved_dma =
-                                       xilinx_dma_prep_interleaved;
-               /* Residue calculation is supported by only AXI DMA */
+               /* Residue calculation is supported by only AXI DMA and CDMA */
                xdev->common.residue_granularity =
                                          DMA_RESIDUE_GRANULARITY_SEGMENT;
        } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
                dma_cap_set(DMA_MEMCPY, xdev->common.cap_mask);
                xdev->common.device_prep_dma_memcpy = xilinx_cdma_prep_memcpy;
+               /* Residue calculation is supported by only AXI DMA and CDMA */
+               xdev->common.residue_granularity =
+                                         DMA_RESIDUE_GRANULARITY_SEGMENT;
+       } else if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) {
+               xdev->common.device_prep_slave_sg = xilinx_mcdma_prep_slave_sg;
        } else {
                xdev->common.device_prep_interleaved_dma =
                                xilinx_vdma_dma_prep_interleaved;
@@ -2755,6 +3124,8 @@ static int xilinx_dma_probe(struct platform_device *pdev)
                dev_info(&pdev->dev, "Xilinx AXI DMA Engine Driver Probed!!\n");
        else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA)
                dev_info(&pdev->dev, "Xilinx AXI CDMA Engine Driver Probed!!\n");
+       else if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA)
+               dev_info(&pdev->dev, "Xilinx AXI MCDMA Engine Driver Probed!!\n");
        else
                dev_info(&pdev->dev, "Xilinx AXI VDMA Engine Driver Probed!!\n");
 
index 9f4436f7c9149621b6b0db6e308456bbdaff950d..5fe2e8b9a7b80a63f1b34f8d96138c388b04db63 100644 (file)
@@ -754,18 +754,13 @@ static struct dma_chan *zx_of_dma_simple_xlate(struct of_phandle_args *dma_spec,
 static int zx_dma_probe(struct platform_device *op)
 {
        struct zx_dma_dev *d;
-       struct resource *iores;
        int i, ret = 0;
 
-       iores = platform_get_resource(op, IORESOURCE_MEM, 0);
-       if (!iores)
-               return -EINVAL;
-
        d = devm_kzalloc(&op->dev, sizeof(*d), GFP_KERNEL);
        if (!d)
                return -ENOMEM;
 
-       d->base = devm_ioremap_resource(&op->dev, iores);
+       d->base = devm_platform_ioremap_resource(op, 0);
        if (IS_ERR(d->base))
                return PTR_ERR(d->base);
 
@@ -894,7 +889,6 @@ static int zx_dma_remove(struct platform_device *op)
                list_del(&c->vc.chan.device_node);
        }
        clk_disable_unprepare(d->clk);
-       dmam_pool_destroy(d->pool);
 
        return 0;
 }
diff --git a/include/dt-bindings/dma/x1000-dma.h b/include/dt-bindings/dma/x1000-dma.h
new file mode 100644 (file)
index 0000000..401e165
--- /dev/null
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * This header provides macros for X1000 DMA bindings.
+ *
+ * Copyright (c) 2019 Zhou Yanjie <zhouyanjie@zoho.com>
+ */
+
+#ifndef __DT_BINDINGS_DMA_X1000_DMA_H__
+#define __DT_BINDINGS_DMA_X1000_DMA_H__
+
+/*
+ * Request type numbers for the X1000 DMA controller (written to the DRTn
+ * register for the channel).
+ */
+#define X1000_DMA_DMIC_RX      0x5
+#define X1000_DMA_I2S0_TX      0x6
+#define X1000_DMA_I2S0_RX      0x7
+#define X1000_DMA_AUTO         0x8
+#define X1000_DMA_UART2_TX     0x10
+#define X1000_DMA_UART2_RX     0x11
+#define X1000_DMA_UART1_TX     0x12
+#define X1000_DMA_UART1_RX     0x13
+#define X1000_DMA_UART0_TX     0x14
+#define X1000_DMA_UART0_RX     0x15
+#define X1000_DMA_SSI0_TX      0x16
+#define X1000_DMA_SSI0_RX      0x17
+#define X1000_DMA_MSC0_TX      0x1a
+#define X1000_DMA_MSC0_RX      0x1b
+#define X1000_DMA_MSC1_TX      0x1c
+#define X1000_DMA_MSC1_RX      0x1d
+#define X1000_DMA_PCM0_TX      0x20
+#define X1000_DMA_PCM0_RX      0x21
+#define X1000_DMA_SMB0_TX      0x24
+#define X1000_DMA_SMB0_RX      0x25
+#define X1000_DMA_SMB1_TX      0x26
+#define X1000_DMA_SMB1_RX      0x27
+#define X1000_DMA_SMB2_TX      0x28
+#define X1000_DMA_SMB2_RX      0x29
+
+#endif /* __DT_BINDINGS_DMA_X1000_DMA_H__ */