1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Ingenic JZ4780 DMA controller
5 * Copyright (c) 2015 Imagination Technologies
6 * Author: Alex Smith <alex@alex-smith.me.uk>
10 #include <linux/dmapool.h>
11 #include <linux/init.h>
12 #include <linux/interrupt.h>
13 #include <linux/module.h>
15 #include <linux/of_device.h>
16 #include <linux/of_dma.h>
17 #include <linux/platform_device.h>
18 #include <linux/slab.h>
20 #include "dmaengine.h"
23 /* Global registers. */
24 #define JZ_DMA_REG_DMAC 0x00
25 #define JZ_DMA_REG_DIRQP 0x04
26 #define JZ_DMA_REG_DDR 0x08
27 #define JZ_DMA_REG_DDRS 0x0c
28 #define JZ_DMA_REG_DCKE 0x10
29 #define JZ_DMA_REG_DCKES 0x14
30 #define JZ_DMA_REG_DCKEC 0x18
31 #define JZ_DMA_REG_DMACP 0x1c
32 #define JZ_DMA_REG_DSIRQP 0x20
33 #define JZ_DMA_REG_DSIRQM 0x24
34 #define JZ_DMA_REG_DCIRQP 0x28
35 #define JZ_DMA_REG_DCIRQM 0x2c
37 /* Per-channel registers. */
38 #define JZ_DMA_REG_CHAN(n) (n * 0x20)
39 #define JZ_DMA_REG_DSA 0x00
40 #define JZ_DMA_REG_DTA 0x04
41 #define JZ_DMA_REG_DTC 0x08
42 #define JZ_DMA_REG_DRT 0x0c
43 #define JZ_DMA_REG_DCS 0x10
44 #define JZ_DMA_REG_DCM 0x14
45 #define JZ_DMA_REG_DDA 0x18
46 #define JZ_DMA_REG_DSD 0x1c
48 #define JZ_DMA_DMAC_DMAE BIT(0)
49 #define JZ_DMA_DMAC_AR BIT(2)
50 #define JZ_DMA_DMAC_HLT BIT(3)
51 #define JZ_DMA_DMAC_FAIC BIT(27)
52 #define JZ_DMA_DMAC_FMSC BIT(31)
54 #define JZ_DMA_DRT_AUTO 0x8
56 #define JZ_DMA_DCS_CTE BIT(0)
57 #define JZ_DMA_DCS_HLT BIT(2)
58 #define JZ_DMA_DCS_TT BIT(3)
59 #define JZ_DMA_DCS_AR BIT(4)
60 #define JZ_DMA_DCS_DES8 BIT(30)
62 #define JZ_DMA_DCM_LINK BIT(0)
63 #define JZ_DMA_DCM_TIE BIT(1)
64 #define JZ_DMA_DCM_STDE BIT(2)
65 #define JZ_DMA_DCM_TSZ_SHIFT 8
66 #define JZ_DMA_DCM_TSZ_MASK (0x7 << JZ_DMA_DCM_TSZ_SHIFT)
67 #define JZ_DMA_DCM_DP_SHIFT 12
68 #define JZ_DMA_DCM_SP_SHIFT 14
69 #define JZ_DMA_DCM_DAI BIT(22)
70 #define JZ_DMA_DCM_SAI BIT(23)
72 #define JZ_DMA_SIZE_4_BYTE 0x0
73 #define JZ_DMA_SIZE_1_BYTE 0x1
74 #define JZ_DMA_SIZE_2_BYTE 0x2
75 #define JZ_DMA_SIZE_16_BYTE 0x3
76 #define JZ_DMA_SIZE_32_BYTE 0x4
77 #define JZ_DMA_SIZE_64_BYTE 0x5
78 #define JZ_DMA_SIZE_128_BYTE 0x6
80 #define JZ_DMA_WIDTH_32_BIT 0x0
81 #define JZ_DMA_WIDTH_8_BIT 0x1
82 #define JZ_DMA_WIDTH_16_BIT 0x2
84 #define JZ_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
85 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
86 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
88 #define JZ4780_DMA_CTRL_OFFSET 0x1000
90 /* macros for use with jz4780_dma_soc_data.flags */
91 #define JZ_SOC_DATA_ALLOW_LEGACY_DT BIT(0)
92 #define JZ_SOC_DATA_PROGRAMMABLE_DMA BIT(1)
93 #define JZ_SOC_DATA_PER_CHAN_PM BIT(2)
94 #define JZ_SOC_DATA_NO_DCKES_DCKEC BIT(3)
97 * struct jz4780_dma_hwdesc - descriptor structure read by the DMA controller.
98 * @dcm: value for the DCM (channel command) register
99 * @dsa: source address
100 * @dta: target address
101 * @dtc: transfer count (number of blocks of the transfer size specified in DCM
102 * to transfer) in the low 24 bits, offset of the next descriptor from the
103 * descriptor base address in the upper 8 bits.
105 struct jz4780_dma_hwdesc {
112 /* Size of allocations for hardware descriptor blocks. */
113 #define JZ_DMA_DESC_BLOCK_SIZE PAGE_SIZE
114 #define JZ_DMA_MAX_DESC \
115 (JZ_DMA_DESC_BLOCK_SIZE / sizeof(struct jz4780_dma_hwdesc))
117 struct jz4780_dma_desc {
118 struct virt_dma_desc vdesc;
120 struct jz4780_dma_hwdesc *desc;
121 dma_addr_t desc_phys;
123 enum dma_transaction_type type;
127 struct jz4780_dma_chan {
128 struct virt_dma_chan vchan;
130 struct dma_pool *desc_pool;
132 uint32_t transfer_type;
133 uint32_t transfer_shift;
134 struct dma_slave_config config;
136 struct jz4780_dma_desc *desc;
137 unsigned int curr_hwdesc;
140 struct jz4780_dma_soc_data {
141 unsigned int nb_channels;
142 unsigned int transfer_ord_max;
146 struct jz4780_dma_dev {
147 struct dma_device dma_device;
148 void __iomem *chn_base;
149 void __iomem *ctrl_base;
152 const struct jz4780_dma_soc_data *soc_data;
154 uint32_t chan_reserved;
155 struct jz4780_dma_chan chan[];
158 struct jz4780_dma_filter_data {
159 uint32_t transfer_type;
163 static inline struct jz4780_dma_chan *to_jz4780_dma_chan(struct dma_chan *chan)
165 return container_of(chan, struct jz4780_dma_chan, vchan.chan);
168 static inline struct jz4780_dma_desc *to_jz4780_dma_desc(
169 struct virt_dma_desc *vdesc)
171 return container_of(vdesc, struct jz4780_dma_desc, vdesc);
174 static inline struct jz4780_dma_dev *jz4780_dma_chan_parent(
175 struct jz4780_dma_chan *jzchan)
177 return container_of(jzchan->vchan.chan.device, struct jz4780_dma_dev,
181 static inline uint32_t jz4780_dma_chn_readl(struct jz4780_dma_dev *jzdma,
182 unsigned int chn, unsigned int reg)
184 return readl(jzdma->chn_base + reg + JZ_DMA_REG_CHAN(chn));
187 static inline void jz4780_dma_chn_writel(struct jz4780_dma_dev *jzdma,
188 unsigned int chn, unsigned int reg, uint32_t val)
190 writel(val, jzdma->chn_base + reg + JZ_DMA_REG_CHAN(chn));
193 static inline uint32_t jz4780_dma_ctrl_readl(struct jz4780_dma_dev *jzdma,
196 return readl(jzdma->ctrl_base + reg);
199 static inline void jz4780_dma_ctrl_writel(struct jz4780_dma_dev *jzdma,
200 unsigned int reg, uint32_t val)
202 writel(val, jzdma->ctrl_base + reg);
205 static inline void jz4780_dma_chan_enable(struct jz4780_dma_dev *jzdma,
208 if (jzdma->soc_data->flags & JZ_SOC_DATA_PER_CHAN_PM) {
211 if (jzdma->soc_data->flags & JZ_SOC_DATA_NO_DCKES_DCKEC)
212 reg = JZ_DMA_REG_DCKE;
214 reg = JZ_DMA_REG_DCKES;
216 jz4780_dma_ctrl_writel(jzdma, reg, BIT(chn));
220 static inline void jz4780_dma_chan_disable(struct jz4780_dma_dev *jzdma,
223 if ((jzdma->soc_data->flags & JZ_SOC_DATA_PER_CHAN_PM) &&
224 !(jzdma->soc_data->flags & JZ_SOC_DATA_NO_DCKES_DCKEC))
225 jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DCKEC, BIT(chn));
228 static struct jz4780_dma_desc *jz4780_dma_desc_alloc(
229 struct jz4780_dma_chan *jzchan, unsigned int count,
230 enum dma_transaction_type type)
232 struct jz4780_dma_desc *desc;
234 if (count > JZ_DMA_MAX_DESC)
237 desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
241 desc->desc = dma_pool_alloc(jzchan->desc_pool, GFP_NOWAIT,
253 static void jz4780_dma_desc_free(struct virt_dma_desc *vdesc)
255 struct jz4780_dma_desc *desc = to_jz4780_dma_desc(vdesc);
256 struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(vdesc->tx.chan);
258 dma_pool_free(jzchan->desc_pool, desc->desc, desc->desc_phys);
262 static uint32_t jz4780_dma_transfer_size(struct jz4780_dma_chan *jzchan,
263 unsigned long val, uint32_t *shift)
265 struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
266 int ord = ffs(val) - 1;
269 * 8 byte transfer sizes unsupported so fall back on 4. If it's larger
270 * than the maximum, just limit it. It is perfectly safe to fall back
271 * in this way since we won't exceed the maximum burst size supported
272 * by the device, the only effect is reduced efficiency. This is better
273 * than refusing to perform the request at all.
277 else if (ord > jzdma->soc_data->transfer_ord_max)
278 ord = jzdma->soc_data->transfer_ord_max;
284 return JZ_DMA_SIZE_1_BYTE;
286 return JZ_DMA_SIZE_2_BYTE;
288 return JZ_DMA_SIZE_4_BYTE;
290 return JZ_DMA_SIZE_16_BYTE;
292 return JZ_DMA_SIZE_32_BYTE;
294 return JZ_DMA_SIZE_64_BYTE;
296 return JZ_DMA_SIZE_128_BYTE;
300 static int jz4780_dma_setup_hwdesc(struct jz4780_dma_chan *jzchan,
301 struct jz4780_dma_hwdesc *desc, dma_addr_t addr, size_t len,
302 enum dma_transfer_direction direction)
304 struct dma_slave_config *config = &jzchan->config;
305 uint32_t width, maxburst, tsz;
307 if (direction == DMA_MEM_TO_DEV) {
308 desc->dcm = JZ_DMA_DCM_SAI;
310 desc->dta = config->dst_addr;
312 width = config->dst_addr_width;
313 maxburst = config->dst_maxburst;
315 desc->dcm = JZ_DMA_DCM_DAI;
316 desc->dsa = config->src_addr;
319 width = config->src_addr_width;
320 maxburst = config->src_maxburst;
324 * This calculates the maximum transfer size that can be used with the
325 * given address, length, width and maximum burst size. The address
326 * must be aligned to the transfer size, the total length must be
327 * divisible by the transfer size, and we must not use more than the
328 * maximum burst specified by the user.
330 tsz = jz4780_dma_transfer_size(jzchan, addr | len | (width * maxburst),
331 &jzchan->transfer_shift);
334 case DMA_SLAVE_BUSWIDTH_1_BYTE:
335 case DMA_SLAVE_BUSWIDTH_2_BYTES:
337 case DMA_SLAVE_BUSWIDTH_4_BYTES:
338 width = JZ_DMA_WIDTH_32_BIT;
344 desc->dcm |= tsz << JZ_DMA_DCM_TSZ_SHIFT;
345 desc->dcm |= width << JZ_DMA_DCM_SP_SHIFT;
346 desc->dcm |= width << JZ_DMA_DCM_DP_SHIFT;
348 desc->dtc = len >> jzchan->transfer_shift;
352 static struct dma_async_tx_descriptor *jz4780_dma_prep_slave_sg(
353 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
354 enum dma_transfer_direction direction, unsigned long flags,
357 struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
358 struct jz4780_dma_desc *desc;
362 desc = jz4780_dma_desc_alloc(jzchan, sg_len, DMA_SLAVE);
366 for (i = 0; i < sg_len; i++) {
367 err = jz4780_dma_setup_hwdesc(jzchan, &desc->desc[i],
368 sg_dma_address(&sgl[i]),
372 jz4780_dma_desc_free(&jzchan->desc->vdesc);
376 desc->desc[i].dcm |= JZ_DMA_DCM_TIE;
378 if (i != (sg_len - 1)) {
379 /* Automatically proceeed to the next descriptor. */
380 desc->desc[i].dcm |= JZ_DMA_DCM_LINK;
383 * The upper 8 bits of the DTC field in the descriptor
384 * must be set to (offset from descriptor base of next
388 (((i + 1) * sizeof(*desc->desc)) >> 4) << 24;
392 return vchan_tx_prep(&jzchan->vchan, &desc->vdesc, flags);
395 static struct dma_async_tx_descriptor *jz4780_dma_prep_dma_cyclic(
396 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
397 size_t period_len, enum dma_transfer_direction direction,
400 struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
401 struct jz4780_dma_desc *desc;
402 unsigned int periods, i;
405 if (buf_len % period_len)
408 periods = buf_len / period_len;
410 desc = jz4780_dma_desc_alloc(jzchan, periods, DMA_CYCLIC);
414 for (i = 0; i < periods; i++) {
415 err = jz4780_dma_setup_hwdesc(jzchan, &desc->desc[i], buf_addr,
416 period_len, direction);
418 jz4780_dma_desc_free(&jzchan->desc->vdesc);
422 buf_addr += period_len;
425 * Set the link bit to indicate that the controller should
426 * automatically proceed to the next descriptor. In
427 * jz4780_dma_begin(), this will be cleared if we need to issue
428 * an interrupt after each period.
430 desc->desc[i].dcm |= JZ_DMA_DCM_TIE | JZ_DMA_DCM_LINK;
433 * The upper 8 bits of the DTC field in the descriptor must be
434 * set to (offset from descriptor base of next descriptor >> 4).
435 * If this is the last descriptor, link it back to the first,
436 * i.e. leave offset set to 0, otherwise point to the next one.
438 if (i != (periods - 1)) {
440 (((i + 1) * sizeof(*desc->desc)) >> 4) << 24;
444 return vchan_tx_prep(&jzchan->vchan, &desc->vdesc, flags);
447 static struct dma_async_tx_descriptor *jz4780_dma_prep_dma_memcpy(
448 struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
449 size_t len, unsigned long flags)
451 struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
452 struct jz4780_dma_desc *desc;
455 desc = jz4780_dma_desc_alloc(jzchan, 1, DMA_MEMCPY);
459 tsz = jz4780_dma_transfer_size(jzchan, dest | src | len,
460 &jzchan->transfer_shift);
462 jzchan->transfer_type = JZ_DMA_DRT_AUTO;
464 desc->desc[0].dsa = src;
465 desc->desc[0].dta = dest;
466 desc->desc[0].dcm = JZ_DMA_DCM_TIE | JZ_DMA_DCM_SAI | JZ_DMA_DCM_DAI |
467 tsz << JZ_DMA_DCM_TSZ_SHIFT |
468 JZ_DMA_WIDTH_32_BIT << JZ_DMA_DCM_SP_SHIFT |
469 JZ_DMA_WIDTH_32_BIT << JZ_DMA_DCM_DP_SHIFT;
470 desc->desc[0].dtc = len >> jzchan->transfer_shift;
472 return vchan_tx_prep(&jzchan->vchan, &desc->vdesc, flags);
475 static void jz4780_dma_begin(struct jz4780_dma_chan *jzchan)
477 struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
478 struct virt_dma_desc *vdesc;
480 dma_addr_t desc_phys;
483 vdesc = vchan_next_desc(&jzchan->vchan);
487 list_del(&vdesc->node);
489 jzchan->desc = to_jz4780_dma_desc(vdesc);
490 jzchan->curr_hwdesc = 0;
492 if (jzchan->desc->type == DMA_CYCLIC && vdesc->tx.callback) {
494 * The DMA controller doesn't support triggering an
495 * interrupt after processing each descriptor, only
496 * after processing an entire terminated list of
497 * descriptors. For a cyclic DMA setup the list of
498 * descriptors is not terminated so we can never get an
501 * If the user requested a callback for a cyclic DMA
502 * setup then we workaround this hardware limitation
503 * here by degrading to a set of unlinked descriptors
504 * which we will submit in sequence in response to the
505 * completion of processing the previous descriptor.
507 for (i = 0; i < jzchan->desc->count; i++)
508 jzchan->desc->desc[i].dcm &= ~JZ_DMA_DCM_LINK;
512 * There is an existing transfer, therefore this must be one
513 * for which we unlinked the descriptors above. Advance to the
514 * next one in the list.
516 jzchan->curr_hwdesc =
517 (jzchan->curr_hwdesc + 1) % jzchan->desc->count;
520 /* Enable the channel's clock. */
521 jz4780_dma_chan_enable(jzdma, jzchan->id);
523 /* Use 4-word descriptors. */
524 jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DCS, 0);
526 /* Set transfer type. */
527 jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DRT,
528 jzchan->transfer_type);
531 * Set the transfer count. This is redundant for a descriptor-driven
532 * transfer. However, there can be a delay between the transfer start
533 * time and when DTCn reg contains the new transfer count. Setting
534 * it explicitly ensures residue is computed correctly at all times.
536 jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DTC,
537 jzchan->desc->desc[jzchan->curr_hwdesc].dtc);
539 /* Write descriptor address and initiate descriptor fetch. */
540 desc_phys = jzchan->desc->desc_phys +
541 (jzchan->curr_hwdesc * sizeof(*jzchan->desc->desc));
542 jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DDA, desc_phys);
543 jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DDRS, BIT(jzchan->id));
545 /* Enable the channel. */
546 jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DCS,
550 static void jz4780_dma_issue_pending(struct dma_chan *chan)
552 struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
555 spin_lock_irqsave(&jzchan->vchan.lock, flags);
557 if (vchan_issue_pending(&jzchan->vchan) && !jzchan->desc)
558 jz4780_dma_begin(jzchan);
560 spin_unlock_irqrestore(&jzchan->vchan.lock, flags);
563 static int jz4780_dma_terminate_all(struct dma_chan *chan)
565 struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
566 struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
570 spin_lock_irqsave(&jzchan->vchan.lock, flags);
572 /* Clear the DMA status and stop the transfer. */
573 jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DCS, 0);
575 vchan_terminate_vdesc(&jzchan->desc->vdesc);
579 jz4780_dma_chan_disable(jzdma, jzchan->id);
581 vchan_get_all_descriptors(&jzchan->vchan, &head);
583 spin_unlock_irqrestore(&jzchan->vchan.lock, flags);
585 vchan_dma_desc_free_list(&jzchan->vchan, &head);
589 static void jz4780_dma_synchronize(struct dma_chan *chan)
591 struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
592 struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
594 vchan_synchronize(&jzchan->vchan);
595 jz4780_dma_chan_disable(jzdma, jzchan->id);
598 static int jz4780_dma_config(struct dma_chan *chan,
599 struct dma_slave_config *config)
601 struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
603 if ((config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
604 || (config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES))
607 /* Copy the reset of the slave configuration, it is used later. */
608 memcpy(&jzchan->config, config, sizeof(jzchan->config));
613 static size_t jz4780_dma_desc_residue(struct jz4780_dma_chan *jzchan,
614 struct jz4780_dma_desc *desc, unsigned int next_sg)
616 struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
617 unsigned int count = 0;
620 for (i = next_sg; i < desc->count; i++)
621 count += desc->desc[i].dtc & GENMASK(23, 0);
624 count += jz4780_dma_chn_readl(jzdma, jzchan->id,
627 return count << jzchan->transfer_shift;
630 static enum dma_status jz4780_dma_tx_status(struct dma_chan *chan,
631 dma_cookie_t cookie, struct dma_tx_state *txstate)
633 struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
634 struct virt_dma_desc *vdesc;
635 enum dma_status status;
637 unsigned long residue = 0;
639 status = dma_cookie_status(chan, cookie, txstate);
640 if ((status == DMA_COMPLETE) || (txstate == NULL))
643 spin_lock_irqsave(&jzchan->vchan.lock, flags);
645 vdesc = vchan_find_desc(&jzchan->vchan, cookie);
647 /* On the issued list, so hasn't been processed yet */
648 residue = jz4780_dma_desc_residue(jzchan,
649 to_jz4780_dma_desc(vdesc), 0);
650 } else if (cookie == jzchan->desc->vdesc.tx.cookie) {
651 residue = jz4780_dma_desc_residue(jzchan, jzchan->desc,
652 jzchan->curr_hwdesc + 1);
654 dma_set_residue(txstate, residue);
656 if (vdesc && jzchan->desc && vdesc == &jzchan->desc->vdesc
657 && jzchan->desc->status & (JZ_DMA_DCS_AR | JZ_DMA_DCS_HLT))
660 spin_unlock_irqrestore(&jzchan->vchan.lock, flags);
664 static bool jz4780_dma_chan_irq(struct jz4780_dma_dev *jzdma,
665 struct jz4780_dma_chan *jzchan)
670 spin_lock(&jzchan->vchan.lock);
672 dcs = jz4780_dma_chn_readl(jzdma, jzchan->id, JZ_DMA_REG_DCS);
673 jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DCS, 0);
675 if (dcs & JZ_DMA_DCS_AR) {
676 dev_warn(&jzchan->vchan.chan.dev->device,
677 "address error (DCS=0x%x)\n", dcs);
680 if (dcs & JZ_DMA_DCS_HLT) {
681 dev_warn(&jzchan->vchan.chan.dev->device,
682 "channel halt (DCS=0x%x)\n", dcs);
686 jzchan->desc->status = dcs;
688 if ((dcs & (JZ_DMA_DCS_AR | JZ_DMA_DCS_HLT)) == 0) {
689 if (jzchan->desc->type == DMA_CYCLIC) {
690 vchan_cyclic_callback(&jzchan->desc->vdesc);
692 jz4780_dma_begin(jzchan);
693 } else if (dcs & JZ_DMA_DCS_TT) {
694 vchan_cookie_complete(&jzchan->desc->vdesc);
697 jz4780_dma_begin(jzchan);
699 /* False positive - continue the transfer */
701 jz4780_dma_chn_writel(jzdma, jzchan->id,
707 dev_err(&jzchan->vchan.chan.dev->device,
708 "channel IRQ with no active transfer\n");
711 spin_unlock(&jzchan->vchan.lock);
716 static irqreturn_t jz4780_dma_irq_handler(int irq, void *data)
718 struct jz4780_dma_dev *jzdma = data;
719 unsigned int nb_channels = jzdma->soc_data->nb_channels;
720 unsigned long pending;
724 pending = jz4780_dma_ctrl_readl(jzdma, JZ_DMA_REG_DIRQP);
726 for_each_set_bit(i, &pending, nb_channels) {
727 if (jz4780_dma_chan_irq(jzdma, &jzdma->chan[i]))
731 /* Clear halt and address error status of all channels. */
732 dmac = jz4780_dma_ctrl_readl(jzdma, JZ_DMA_REG_DMAC);
733 dmac &= ~(JZ_DMA_DMAC_HLT | JZ_DMA_DMAC_AR);
734 jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DMAC, dmac);
736 /* Clear interrupt pending status. */
737 jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DIRQP, pending);
742 static int jz4780_dma_alloc_chan_resources(struct dma_chan *chan)
744 struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
746 jzchan->desc_pool = dma_pool_create(dev_name(&chan->dev->device),
748 JZ_DMA_DESC_BLOCK_SIZE,
750 if (!jzchan->desc_pool) {
751 dev_err(&chan->dev->device,
752 "failed to allocate descriptor pool\n");
759 static void jz4780_dma_free_chan_resources(struct dma_chan *chan)
761 struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
763 vchan_free_chan_resources(&jzchan->vchan);
764 dma_pool_destroy(jzchan->desc_pool);
765 jzchan->desc_pool = NULL;
768 static bool jz4780_dma_filter_fn(struct dma_chan *chan, void *param)
770 struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
771 struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
772 struct jz4780_dma_filter_data *data = param;
775 if (data->channel > -1) {
776 if (data->channel != jzchan->id)
778 } else if (jzdma->chan_reserved & BIT(jzchan->id)) {
782 jzchan->transfer_type = data->transfer_type;
787 static struct dma_chan *jz4780_of_dma_xlate(struct of_phandle_args *dma_spec,
788 struct of_dma *ofdma)
790 struct jz4780_dma_dev *jzdma = ofdma->of_dma_data;
791 dma_cap_mask_t mask = jzdma->dma_device.cap_mask;
792 struct jz4780_dma_filter_data data;
794 if (dma_spec->args_count != 2)
797 data.transfer_type = dma_spec->args[0];
798 data.channel = dma_spec->args[1];
800 if (data.channel > -1) {
801 if (data.channel >= jzdma->soc_data->nb_channels) {
802 dev_err(jzdma->dma_device.dev,
803 "device requested non-existent channel %u\n",
808 /* Can only select a channel marked as reserved. */
809 if (!(jzdma->chan_reserved & BIT(data.channel))) {
810 dev_err(jzdma->dma_device.dev,
811 "device requested unreserved channel %u\n",
816 jzdma->chan[data.channel].transfer_type = data.transfer_type;
818 return dma_get_slave_channel(
819 &jzdma->chan[data.channel].vchan.chan);
821 return __dma_request_channel(&mask, jz4780_dma_filter_fn, &data,
826 static int jz4780_dma_probe(struct platform_device *pdev)
828 struct device *dev = &pdev->dev;
829 const struct jz4780_dma_soc_data *soc_data;
830 struct jz4780_dma_dev *jzdma;
831 struct jz4780_dma_chan *jzchan;
832 struct dma_device *dd;
833 struct resource *res;
837 dev_err(dev, "This driver must be probed from devicetree\n");
841 soc_data = device_get_match_data(dev);
845 jzdma = devm_kzalloc(dev, struct_size(jzdma, chan,
846 soc_data->nb_channels), GFP_KERNEL);
850 jzdma->soc_data = soc_data;
851 platform_set_drvdata(pdev, jzdma);
853 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
855 dev_err(dev, "failed to get I/O memory\n");
859 jzdma->chn_base = devm_ioremap_resource(dev, res);
860 if (IS_ERR(jzdma->chn_base))
861 return PTR_ERR(jzdma->chn_base);
863 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
865 jzdma->ctrl_base = devm_ioremap_resource(dev, res);
866 if (IS_ERR(jzdma->ctrl_base))
867 return PTR_ERR(jzdma->ctrl_base);
868 } else if (soc_data->flags & JZ_SOC_DATA_ALLOW_LEGACY_DT) {
870 * On JZ4780, if the second memory resource was not supplied,
871 * assume we're using an old devicetree, and calculate the
872 * offset to the control registers.
874 jzdma->ctrl_base = jzdma->chn_base + JZ4780_DMA_CTRL_OFFSET;
876 dev_err(dev, "failed to get I/O memory\n");
880 ret = platform_get_irq(pdev, 0);
882 dev_err(dev, "failed to get IRQ: %d\n", ret);
888 ret = request_irq(jzdma->irq, jz4780_dma_irq_handler, 0, dev_name(dev),
891 dev_err(dev, "failed to request IRQ %u!\n", jzdma->irq);
895 jzdma->clk = devm_clk_get(dev, NULL);
896 if (IS_ERR(jzdma->clk)) {
897 dev_err(dev, "failed to get clock\n");
898 ret = PTR_ERR(jzdma->clk);
902 clk_prepare_enable(jzdma->clk);
904 /* Property is optional, if it doesn't exist the value will remain 0. */
905 of_property_read_u32_index(dev->of_node, "ingenic,reserved-channels",
906 0, &jzdma->chan_reserved);
908 dd = &jzdma->dma_device;
910 dma_cap_set(DMA_MEMCPY, dd->cap_mask);
911 dma_cap_set(DMA_SLAVE, dd->cap_mask);
912 dma_cap_set(DMA_CYCLIC, dd->cap_mask);
915 dd->copy_align = DMAENGINE_ALIGN_4_BYTES;
916 dd->device_alloc_chan_resources = jz4780_dma_alloc_chan_resources;
917 dd->device_free_chan_resources = jz4780_dma_free_chan_resources;
918 dd->device_prep_slave_sg = jz4780_dma_prep_slave_sg;
919 dd->device_prep_dma_cyclic = jz4780_dma_prep_dma_cyclic;
920 dd->device_prep_dma_memcpy = jz4780_dma_prep_dma_memcpy;
921 dd->device_config = jz4780_dma_config;
922 dd->device_terminate_all = jz4780_dma_terminate_all;
923 dd->device_synchronize = jz4780_dma_synchronize;
924 dd->device_tx_status = jz4780_dma_tx_status;
925 dd->device_issue_pending = jz4780_dma_issue_pending;
926 dd->src_addr_widths = JZ_DMA_BUSWIDTHS;
927 dd->dst_addr_widths = JZ_DMA_BUSWIDTHS;
928 dd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
929 dd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
932 * Enable DMA controller, mark all channels as not programmable.
933 * Also set the FMSC bit - it increases MSC performance, so it makes
934 * little sense not to enable it.
936 jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DMAC, JZ_DMA_DMAC_DMAE |
937 JZ_DMA_DMAC_FAIC | JZ_DMA_DMAC_FMSC);
939 if (soc_data->flags & JZ_SOC_DATA_PROGRAMMABLE_DMA)
940 jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DMACP, 0);
942 INIT_LIST_HEAD(&dd->channels);
944 for (i = 0; i < soc_data->nb_channels; i++) {
945 jzchan = &jzdma->chan[i];
948 vchan_init(&jzchan->vchan, dd);
949 jzchan->vchan.desc_free = jz4780_dma_desc_free;
952 ret = dmaenginem_async_device_register(dd);
954 dev_err(dev, "failed to register device\n");
955 goto err_disable_clk;
958 /* Register with OF DMA helpers. */
959 ret = of_dma_controller_register(dev->of_node, jz4780_of_dma_xlate,
962 dev_err(dev, "failed to register OF DMA controller\n");
963 goto err_disable_clk;
966 dev_info(dev, "JZ4780 DMA controller initialised\n");
970 clk_disable_unprepare(jzdma->clk);
973 free_irq(jzdma->irq, jzdma);
977 static int jz4780_dma_remove(struct platform_device *pdev)
979 struct jz4780_dma_dev *jzdma = platform_get_drvdata(pdev);
982 of_dma_controller_free(pdev->dev.of_node);
984 free_irq(jzdma->irq, jzdma);
986 for (i = 0; i < jzdma->soc_data->nb_channels; i++)
987 tasklet_kill(&jzdma->chan[i].vchan.task);
992 static const struct jz4780_dma_soc_data jz4740_dma_soc_data = {
994 .transfer_ord_max = 5,
997 static const struct jz4780_dma_soc_data jz4725b_dma_soc_data = {
999 .transfer_ord_max = 5,
1000 .flags = JZ_SOC_DATA_PER_CHAN_PM | JZ_SOC_DATA_NO_DCKES_DCKEC,
1003 static const struct jz4780_dma_soc_data jz4770_dma_soc_data = {
1005 .transfer_ord_max = 6,
1006 .flags = JZ_SOC_DATA_PER_CHAN_PM,
1009 static const struct jz4780_dma_soc_data jz4780_dma_soc_data = {
1011 .transfer_ord_max = 7,
1012 .flags = JZ_SOC_DATA_ALLOW_LEGACY_DT | JZ_SOC_DATA_PROGRAMMABLE_DMA,
1015 static const struct of_device_id jz4780_dma_dt_match[] = {
1016 { .compatible = "ingenic,jz4740-dma", .data = &jz4740_dma_soc_data },
1017 { .compatible = "ingenic,jz4725b-dma", .data = &jz4725b_dma_soc_data },
1018 { .compatible = "ingenic,jz4770-dma", .data = &jz4770_dma_soc_data },
1019 { .compatible = "ingenic,jz4780-dma", .data = &jz4780_dma_soc_data },
1022 MODULE_DEVICE_TABLE(of, jz4780_dma_dt_match);
1024 static struct platform_driver jz4780_dma_driver = {
1025 .probe = jz4780_dma_probe,
1026 .remove = jz4780_dma_remove,
1028 .name = "jz4780-dma",
1029 .of_match_table = of_match_ptr(jz4780_dma_dt_match),
1033 static int __init jz4780_dma_init(void)
1035 return platform_driver_register(&jz4780_dma_driver);
1037 subsys_initcall(jz4780_dma_init);
1039 static void __exit jz4780_dma_exit(void)
1041 platform_driver_unregister(&jz4780_dma_driver);
1043 module_exit(jz4780_dma_exit);
1045 MODULE_AUTHOR("Alex Smith <alex@alex-smith.me.uk>");
1046 MODULE_DESCRIPTION("Ingenic JZ4780 DMA controller driver");
1047 MODULE_LICENSE("GPL");