2 * Core driver for the Synopsys DesignWare DMA Controller
4 * Copyright (C) 2007-2008 Atmel Corporation
5 * Copyright (C) 2010-2011 ST Microelectronics
6 * Copyright (C) 2013 Intel Corporation
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/bitops.h>
14 #include <linux/delay.h>
15 #include <linux/dmaengine.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/dmapool.h>
18 #include <linux/err.h>
19 #include <linux/init.h>
20 #include <linux/interrupt.h>
23 #include <linux/module.h>
24 #include <linux/slab.h>
25 #include <linux/pm_runtime.h>
27 #include "../dmaengine.h"
31 * This supports the Synopsys "DesignWare AHB Central DMA Controller",
32 * (DW_ahb_dmac) which is used with various AMBA 2.0 systems (not all
33 * of which use ARM any more). See the "Databook" from Synopsys for
34 * information beyond what licensees probably provide.
36 * The driver has been tested with the Atmel AT32AP7000, which does not
37 * support descriptor writeback.
40 #define DWC_DEFAULT_CTLLO(_chan) ({ \
41 struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan); \
42 struct dma_slave_config *_sconfig = &_dwc->dma_sconfig; \
43 bool _is_slave = is_slave_direction(_dwc->direction); \
44 u8 _smsize = _is_slave ? _sconfig->src_maxburst : \
46 u8 _dmsize = _is_slave ? _sconfig->dst_maxburst : \
48 u8 _dms = (_dwc->direction == DMA_MEM_TO_DEV) ? \
49 _dwc->dws.p_master : _dwc->dws.m_master; \
50 u8 _sms = (_dwc->direction == DMA_DEV_TO_MEM) ? \
51 _dwc->dws.p_master : _dwc->dws.m_master; \
53 (DWC_CTLL_DST_MSIZE(_dmsize) \
54 | DWC_CTLL_SRC_MSIZE(_smsize) \
57 | DWC_CTLL_DMS(_dms) \
58 | DWC_CTLL_SMS(_sms)); \
61 /* The set of bus widths supported by the DMA controller */
62 #define DW_DMA_BUSWIDTHS \
63 BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
64 BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
65 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
66 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)
68 /*----------------------------------------------------------------------*/
70 static struct device *chan2dev(struct dma_chan *chan)
72 return &chan->dev->device;
75 static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc)
77 return to_dw_desc(dwc->active_list.next);
80 static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
82 struct dw_desc *desc = txd_to_dw_desc(tx);
83 struct dw_dma_chan *dwc = to_dw_dma_chan(tx->chan);
87 spin_lock_irqsave(&dwc->lock, flags);
88 cookie = dma_cookie_assign(tx);
91 * REVISIT: We should attempt to chain as many descriptors as
92 * possible, perhaps even appending to those already submitted
93 * for DMA. But this is hard to do in a race-free manner.
96 list_add_tail(&desc->desc_node, &dwc->queue);
97 spin_unlock_irqrestore(&dwc->lock, flags);
98 dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n",
99 __func__, desc->txd.cookie);
104 static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
106 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
107 struct dw_desc *desc;
110 desc = dma_pool_zalloc(dw->desc_pool, GFP_ATOMIC, &phys);
114 dwc->descs_allocated++;
115 INIT_LIST_HEAD(&desc->tx_list);
116 dma_async_tx_descriptor_init(&desc->txd, &dwc->chan);
117 desc->txd.tx_submit = dwc_tx_submit;
118 desc->txd.flags = DMA_CTRL_ACK;
119 desc->txd.phys = phys;
123 static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
125 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
126 struct dw_desc *child, *_next;
131 list_for_each_entry_safe(child, _next, &desc->tx_list, desc_node) {
132 list_del(&child->desc_node);
133 dma_pool_free(dw->desc_pool, child, child->txd.phys);
134 dwc->descs_allocated--;
137 dma_pool_free(dw->desc_pool, desc, desc->txd.phys);
138 dwc->descs_allocated--;
141 static void dwc_initialize(struct dw_dma_chan *dwc)
143 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
145 if (test_bit(DW_DMA_IS_INITIALIZED, &dwc->flags))
148 dw->initialize_chan(dwc);
150 /* Enable interrupts */
151 channel_set_bit(dw, MASK.XFER, dwc->mask);
152 channel_set_bit(dw, MASK.ERROR, dwc->mask);
154 set_bit(DW_DMA_IS_INITIALIZED, &dwc->flags);
157 /*----------------------------------------------------------------------*/
159 static inline void dwc_dump_chan_regs(struct dw_dma_chan *dwc)
161 dev_err(chan2dev(&dwc->chan),
162 " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
163 channel_readl(dwc, SAR),
164 channel_readl(dwc, DAR),
165 channel_readl(dwc, LLP),
166 channel_readl(dwc, CTL_HI),
167 channel_readl(dwc, CTL_LO));
170 static inline void dwc_chan_disable(struct dw_dma *dw, struct dw_dma_chan *dwc)
172 channel_clear_bit(dw, CH_EN, dwc->mask);
173 while (dma_readl(dw, CH_EN) & dwc->mask)
177 /*----------------------------------------------------------------------*/
179 /* Perform single block transfer */
180 static inline void dwc_do_single_block(struct dw_dma_chan *dwc,
181 struct dw_desc *desc)
183 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
187 * Software emulation of LLP mode relies on interrupts to continue
188 * multi block transfer.
190 ctllo = lli_read(desc, ctllo) | DWC_CTLL_INT_EN;
192 channel_writel(dwc, SAR, lli_read(desc, sar));
193 channel_writel(dwc, DAR, lli_read(desc, dar));
194 channel_writel(dwc, CTL_LO, ctllo);
195 channel_writel(dwc, CTL_HI, lli_read(desc, ctlhi));
196 channel_set_bit(dw, CH_EN, dwc->mask);
198 /* Move pointer to next descriptor */
199 dwc->tx_node_active = dwc->tx_node_active->next;
202 /* Called with dwc->lock held and bh disabled */
203 static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
205 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
206 u8 lms = DWC_LLP_LMS(dwc->dws.m_master);
207 unsigned long was_soft_llp;
209 /* ASSERT: channel is idle */
210 if (dma_readl(dw, CH_EN) & dwc->mask) {
211 dev_err(chan2dev(&dwc->chan),
212 "%s: BUG: Attempted to start non-idle channel\n",
214 dwc_dump_chan_regs(dwc);
216 /* The tasklet will hopefully advance the queue... */
221 was_soft_llp = test_and_set_bit(DW_DMA_IS_SOFT_LLP,
224 dev_err(chan2dev(&dwc->chan),
225 "BUG: Attempted to start new LLP transfer inside ongoing one\n");
231 first->residue = first->total_len;
232 dwc->tx_node_active = &first->tx_list;
234 /* Submit first block */
235 dwc_do_single_block(dwc, first);
242 channel_writel(dwc, LLP, first->txd.phys | lms);
243 channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
244 channel_writel(dwc, CTL_HI, 0);
245 channel_set_bit(dw, CH_EN, dwc->mask);
248 static void dwc_dostart_first_queued(struct dw_dma_chan *dwc)
250 struct dw_desc *desc;
252 if (list_empty(&dwc->queue))
255 list_move(dwc->queue.next, &dwc->active_list);
256 desc = dwc_first_active(dwc);
257 dev_vdbg(chan2dev(&dwc->chan), "%s: started %u\n", __func__, desc->txd.cookie);
258 dwc_dostart(dwc, desc);
261 /*----------------------------------------------------------------------*/
264 dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc,
265 bool callback_required)
267 struct dma_async_tx_descriptor *txd = &desc->txd;
268 struct dw_desc *child;
270 struct dmaengine_desc_callback cb;
272 dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie);
274 spin_lock_irqsave(&dwc->lock, flags);
275 dma_cookie_complete(txd);
276 if (callback_required)
277 dmaengine_desc_get_callback(txd, &cb);
279 memset(&cb, 0, sizeof(cb));
282 list_for_each_entry(child, &desc->tx_list, desc_node)
283 async_tx_ack(&child->txd);
284 async_tx_ack(&desc->txd);
285 dwc_desc_put(dwc, desc);
286 spin_unlock_irqrestore(&dwc->lock, flags);
288 dmaengine_desc_callback_invoke(&cb, NULL);
291 static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
293 struct dw_desc *desc, *_desc;
297 spin_lock_irqsave(&dwc->lock, flags);
298 if (dma_readl(dw, CH_EN) & dwc->mask) {
299 dev_err(chan2dev(&dwc->chan),
300 "BUG: XFER bit set, but channel not idle!\n");
302 /* Try to continue after resetting the channel... */
303 dwc_chan_disable(dw, dwc);
307 * Submit queued descriptors ASAP, i.e. before we go through
308 * the completed ones.
310 list_splice_init(&dwc->active_list, &list);
311 dwc_dostart_first_queued(dwc);
313 spin_unlock_irqrestore(&dwc->lock, flags);
315 list_for_each_entry_safe(desc, _desc, &list, desc_node)
316 dwc_descriptor_complete(dwc, desc, true);
319 /* Returns how many bytes were already received from source */
320 static inline u32 dwc_get_sent(struct dw_dma_chan *dwc)
322 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
323 u32 ctlhi = channel_readl(dwc, CTL_HI);
324 u32 ctllo = channel_readl(dwc, CTL_LO);
326 return dw->block2bytes(dwc, ctlhi, ctllo >> 4 & 7);
329 static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
332 struct dw_desc *desc, *_desc;
333 struct dw_desc *child;
337 spin_lock_irqsave(&dwc->lock, flags);
338 llp = channel_readl(dwc, LLP);
339 status_xfer = dma_readl(dw, RAW.XFER);
341 if (status_xfer & dwc->mask) {
342 /* Everything we've submitted is done */
343 dma_writel(dw, CLEAR.XFER, dwc->mask);
345 if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) {
346 struct list_head *head, *active = dwc->tx_node_active;
349 * We are inside first active descriptor.
350 * Otherwise something is really wrong.
352 desc = dwc_first_active(dwc);
354 head = &desc->tx_list;
355 if (active != head) {
356 /* Update residue to reflect last sent descriptor */
357 if (active == head->next)
358 desc->residue -= desc->len;
360 desc->residue -= to_dw_desc(active->prev)->len;
362 child = to_dw_desc(active);
364 /* Submit next block */
365 dwc_do_single_block(dwc, child);
367 spin_unlock_irqrestore(&dwc->lock, flags);
371 /* We are done here */
372 clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags);
375 spin_unlock_irqrestore(&dwc->lock, flags);
377 dwc_complete_all(dw, dwc);
381 if (list_empty(&dwc->active_list)) {
382 spin_unlock_irqrestore(&dwc->lock, flags);
386 if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) {
387 dev_vdbg(chan2dev(&dwc->chan), "%s: soft LLP mode\n", __func__);
388 spin_unlock_irqrestore(&dwc->lock, flags);
392 dev_vdbg(chan2dev(&dwc->chan), "%s: llp=%pad\n", __func__, &llp);
394 list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) {
395 /* Initial residue value */
396 desc->residue = desc->total_len;
398 /* Check first descriptors addr */
399 if (desc->txd.phys == DWC_LLP_LOC(llp)) {
400 spin_unlock_irqrestore(&dwc->lock, flags);
404 /* Check first descriptors llp */
405 if (lli_read(desc, llp) == llp) {
406 /* This one is currently in progress */
407 desc->residue -= dwc_get_sent(dwc);
408 spin_unlock_irqrestore(&dwc->lock, flags);
412 desc->residue -= desc->len;
413 list_for_each_entry(child, &desc->tx_list, desc_node) {
414 if (lli_read(child, llp) == llp) {
415 /* Currently in progress */
416 desc->residue -= dwc_get_sent(dwc);
417 spin_unlock_irqrestore(&dwc->lock, flags);
420 desc->residue -= child->len;
424 * No descriptors so far seem to be in progress, i.e.
425 * this one must be done.
427 spin_unlock_irqrestore(&dwc->lock, flags);
428 dwc_descriptor_complete(dwc, desc, true);
429 spin_lock_irqsave(&dwc->lock, flags);
432 dev_err(chan2dev(&dwc->chan),
433 "BUG: All descriptors done, but channel not idle!\n");
435 /* Try to continue after resetting the channel... */
436 dwc_chan_disable(dw, dwc);
438 dwc_dostart_first_queued(dwc);
439 spin_unlock_irqrestore(&dwc->lock, flags);
442 static inline void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_desc *desc)
444 dev_crit(chan2dev(&dwc->chan), " desc: s0x%x d0x%x l0x%x c0x%x:%x\n",
448 lli_read(desc, ctlhi),
449 lli_read(desc, ctllo));
452 static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
454 struct dw_desc *bad_desc;
455 struct dw_desc *child;
458 dwc_scan_descriptors(dw, dwc);
460 spin_lock_irqsave(&dwc->lock, flags);
463 * The descriptor currently at the head of the active list is
464 * borked. Since we don't have any way to report errors, we'll
465 * just have to scream loudly and try to carry on.
467 bad_desc = dwc_first_active(dwc);
468 list_del_init(&bad_desc->desc_node);
469 list_move(dwc->queue.next, dwc->active_list.prev);
471 /* Clear the error flag and try to restart the controller */
472 dma_writel(dw, CLEAR.ERROR, dwc->mask);
473 if (!list_empty(&dwc->active_list))
474 dwc_dostart(dwc, dwc_first_active(dwc));
477 * WARN may seem harsh, but since this only happens
478 * when someone submits a bad physical address in a
479 * descriptor, we should consider ourselves lucky that the
480 * controller flagged an error instead of scribbling over
481 * random memory locations.
483 dev_WARN(chan2dev(&dwc->chan), "Bad descriptor submitted for DMA!\n"
484 " cookie: %d\n", bad_desc->txd.cookie);
485 dwc_dump_lli(dwc, bad_desc);
486 list_for_each_entry(child, &bad_desc->tx_list, desc_node)
487 dwc_dump_lli(dwc, child);
489 spin_unlock_irqrestore(&dwc->lock, flags);
491 /* Pretend the descriptor completed successfully */
492 dwc_descriptor_complete(dwc, bad_desc, true);
495 static void dw_dma_tasklet(unsigned long data)
497 struct dw_dma *dw = (struct dw_dma *)data;
498 struct dw_dma_chan *dwc;
503 status_xfer = dma_readl(dw, RAW.XFER);
504 status_err = dma_readl(dw, RAW.ERROR);
506 dev_vdbg(dw->dma.dev, "%s: status_err=%x\n", __func__, status_err);
508 for (i = 0; i < dw->dma.chancnt; i++) {
510 if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags))
511 dev_vdbg(dw->dma.dev, "Cyclic xfer is not implemented\n");
512 else if (status_err & (1 << i))
513 dwc_handle_error(dw, dwc);
514 else if (status_xfer & (1 << i))
515 dwc_scan_descriptors(dw, dwc);
518 /* Re-enable interrupts */
519 channel_set_bit(dw, MASK.XFER, dw->all_chan_mask);
520 channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask);
523 static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
525 struct dw_dma *dw = dev_id;
528 /* Check if we have any interrupt from the DMAC which is not in use */
532 status = dma_readl(dw, STATUS_INT);
533 dev_vdbg(dw->dma.dev, "%s: status=0x%x\n", __func__, status);
535 /* Check if we have any interrupt from the DMAC */
540 * Just disable the interrupts. We'll turn them back on in the
543 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
544 channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
545 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
547 status = dma_readl(dw, STATUS_INT);
550 "BUG: Unexpected interrupts pending: 0x%x\n",
554 channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1);
555 channel_clear_bit(dw, MASK.BLOCK, (1 << 8) - 1);
556 channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1);
557 channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1);
558 channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1);
561 tasklet_schedule(&dw->tasklet);
566 /*----------------------------------------------------------------------*/
568 static struct dma_async_tx_descriptor *
569 dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
570 size_t len, unsigned long flags)
572 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
573 struct dw_dma *dw = to_dw_dma(chan->device);
574 struct dw_desc *desc;
575 struct dw_desc *first;
576 struct dw_desc *prev;
579 u8 m_master = dwc->dws.m_master;
580 unsigned int src_width;
581 unsigned int dst_width;
582 unsigned int data_width = dw->pdata->data_width[m_master];
584 u8 lms = DWC_LLP_LMS(m_master);
586 dev_vdbg(chan2dev(chan),
587 "%s: d%pad s%pad l0x%zx f0x%lx\n", __func__,
588 &dest, &src, len, flags);
590 if (unlikely(!len)) {
591 dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__);
595 dwc->direction = DMA_MEM_TO_MEM;
597 src_width = dst_width = __ffs(data_width | src | dest | len);
599 ctllo = DWC_DEFAULT_CTLLO(chan)
600 | DWC_CTLL_DST_WIDTH(dst_width)
601 | DWC_CTLL_SRC_WIDTH(src_width)
607 for (offset = 0; offset < len; offset += xfer_count) {
608 desc = dwc_desc_get(dwc);
612 ctlhi = dw->bytes2block(dwc, len - offset, src_width, &xfer_count);
614 lli_write(desc, sar, src + offset);
615 lli_write(desc, dar, dest + offset);
616 lli_write(desc, ctllo, ctllo);
617 lli_write(desc, ctlhi, ctlhi);
618 desc->len = xfer_count;
623 lli_write(prev, llp, desc->txd.phys | lms);
624 list_add_tail(&desc->desc_node, &first->tx_list);
629 if (flags & DMA_PREP_INTERRUPT)
630 /* Trigger interrupt after last block */
631 lli_set(prev, ctllo, DWC_CTLL_INT_EN);
634 lli_clear(prev, ctllo, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
635 first->txd.flags = flags;
636 first->total_len = len;
641 dwc_desc_put(dwc, first);
645 static struct dma_async_tx_descriptor *
646 dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
647 unsigned int sg_len, enum dma_transfer_direction direction,
648 unsigned long flags, void *context)
650 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
651 struct dw_dma *dw = to_dw_dma(chan->device);
652 struct dma_slave_config *sconfig = &dwc->dma_sconfig;
653 struct dw_desc *prev;
654 struct dw_desc *first;
656 u8 m_master = dwc->dws.m_master;
657 u8 lms = DWC_LLP_LMS(m_master);
659 unsigned int reg_width;
660 unsigned int mem_width;
661 unsigned int data_width = dw->pdata->data_width[m_master];
663 struct scatterlist *sg;
664 size_t total_len = 0;
666 dev_vdbg(chan2dev(chan), "%s\n", __func__);
668 if (unlikely(!is_slave_direction(direction) || !sg_len))
671 dwc->direction = direction;
677 reg_width = __ffs(sconfig->dst_addr_width);
678 reg = sconfig->dst_addr;
679 ctllo = (DWC_DEFAULT_CTLLO(chan)
680 | DWC_CTLL_DST_WIDTH(reg_width)
684 ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
685 DWC_CTLL_FC(DW_DMA_FC_D_M2P);
687 for_each_sg(sgl, sg, sg_len, i) {
688 struct dw_desc *desc;
692 mem = sg_dma_address(sg);
693 len = sg_dma_len(sg);
695 mem_width = __ffs(data_width | mem | len);
697 slave_sg_todev_fill_desc:
698 desc = dwc_desc_get(dwc);
702 ctlhi = dw->bytes2block(dwc, len, mem_width, &dlen);
704 lli_write(desc, sar, mem);
705 lli_write(desc, dar, reg);
706 lli_write(desc, ctlhi, ctlhi);
707 lli_write(desc, ctllo, ctllo | DWC_CTLL_SRC_WIDTH(mem_width));
713 lli_write(prev, llp, desc->txd.phys | lms);
714 list_add_tail(&desc->desc_node, &first->tx_list);
723 goto slave_sg_todev_fill_desc;
727 reg_width = __ffs(sconfig->src_addr_width);
728 reg = sconfig->src_addr;
729 ctllo = (DWC_DEFAULT_CTLLO(chan)
730 | DWC_CTLL_SRC_WIDTH(reg_width)
734 ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
735 DWC_CTLL_FC(DW_DMA_FC_D_P2M);
737 for_each_sg(sgl, sg, sg_len, i) {
738 struct dw_desc *desc;
742 mem = sg_dma_address(sg);
743 len = sg_dma_len(sg);
745 slave_sg_fromdev_fill_desc:
746 desc = dwc_desc_get(dwc);
750 ctlhi = dw->bytes2block(dwc, len, reg_width, &dlen);
752 lli_write(desc, sar, reg);
753 lli_write(desc, dar, mem);
754 lli_write(desc, ctlhi, ctlhi);
755 mem_width = __ffs(data_width | mem | dlen);
756 lli_write(desc, ctllo, ctllo | DWC_CTLL_DST_WIDTH(mem_width));
762 lli_write(prev, llp, desc->txd.phys | lms);
763 list_add_tail(&desc->desc_node, &first->tx_list);
772 goto slave_sg_fromdev_fill_desc;
779 if (flags & DMA_PREP_INTERRUPT)
780 /* Trigger interrupt after last block */
781 lli_set(prev, ctllo, DWC_CTLL_INT_EN);
784 lli_clear(prev, ctllo, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
785 first->total_len = total_len;
790 dev_err(chan2dev(chan),
791 "not enough descriptors available. Direction %d\n", direction);
792 dwc_desc_put(dwc, first);
796 bool dw_dma_filter(struct dma_chan *chan, void *param)
798 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
799 struct dw_dma_slave *dws = param;
801 if (dws->dma_dev != chan->device->dev)
804 /* We have to copy data since dws can be temporary storage */
805 memcpy(&dwc->dws, dws, sizeof(struct dw_dma_slave));
809 EXPORT_SYMBOL_GPL(dw_dma_filter);
811 static int dwc_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
813 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
814 struct dw_dma *dw = to_dw_dma(chan->device);
816 memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig));
818 dw->encode_maxburst(dwc, &dwc->dma_sconfig.src_maxburst);
819 dw->encode_maxburst(dwc, &dwc->dma_sconfig.dst_maxburst);
824 static void dwc_chan_pause(struct dw_dma_chan *dwc, bool drain)
826 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
827 unsigned int count = 20; /* timeout iterations */
829 dw->suspend_chan(dwc, drain);
831 while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY) && count--)
834 set_bit(DW_DMA_IS_PAUSED, &dwc->flags);
837 static int dwc_pause(struct dma_chan *chan)
839 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
842 spin_lock_irqsave(&dwc->lock, flags);
843 dwc_chan_pause(dwc, false);
844 spin_unlock_irqrestore(&dwc->lock, flags);
849 static inline void dwc_chan_resume(struct dw_dma_chan *dwc, bool drain)
851 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
853 dw->resume_chan(dwc, drain);
855 clear_bit(DW_DMA_IS_PAUSED, &dwc->flags);
858 static int dwc_resume(struct dma_chan *chan)
860 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
863 spin_lock_irqsave(&dwc->lock, flags);
865 if (test_bit(DW_DMA_IS_PAUSED, &dwc->flags))
866 dwc_chan_resume(dwc, false);
868 spin_unlock_irqrestore(&dwc->lock, flags);
873 static int dwc_terminate_all(struct dma_chan *chan)
875 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
876 struct dw_dma *dw = to_dw_dma(chan->device);
877 struct dw_desc *desc, *_desc;
881 spin_lock_irqsave(&dwc->lock, flags);
883 clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags);
885 dwc_chan_pause(dwc, true);
887 dwc_chan_disable(dw, dwc);
889 dwc_chan_resume(dwc, true);
891 /* active_list entries will end up before queued entries */
892 list_splice_init(&dwc->queue, &list);
893 list_splice_init(&dwc->active_list, &list);
895 spin_unlock_irqrestore(&dwc->lock, flags);
897 /* Flush all pending and queued descriptors */
898 list_for_each_entry_safe(desc, _desc, &list, desc_node)
899 dwc_descriptor_complete(dwc, desc, false);
904 static struct dw_desc *dwc_find_desc(struct dw_dma_chan *dwc, dma_cookie_t c)
906 struct dw_desc *desc;
908 list_for_each_entry(desc, &dwc->active_list, desc_node)
909 if (desc->txd.cookie == c)
915 static u32 dwc_get_residue(struct dw_dma_chan *dwc, dma_cookie_t cookie)
917 struct dw_desc *desc;
921 spin_lock_irqsave(&dwc->lock, flags);
923 desc = dwc_find_desc(dwc, cookie);
925 if (desc == dwc_first_active(dwc)) {
926 residue = desc->residue;
927 if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags) && residue)
928 residue -= dwc_get_sent(dwc);
930 residue = desc->total_len;
936 spin_unlock_irqrestore(&dwc->lock, flags);
940 static enum dma_status
941 dwc_tx_status(struct dma_chan *chan,
943 struct dma_tx_state *txstate)
945 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
948 ret = dma_cookie_status(chan, cookie, txstate);
949 if (ret == DMA_COMPLETE)
952 dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
954 ret = dma_cookie_status(chan, cookie, txstate);
955 if (ret == DMA_COMPLETE)
958 dma_set_residue(txstate, dwc_get_residue(dwc, cookie));
960 if (test_bit(DW_DMA_IS_PAUSED, &dwc->flags) && ret == DMA_IN_PROGRESS)
966 static void dwc_issue_pending(struct dma_chan *chan)
968 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
971 spin_lock_irqsave(&dwc->lock, flags);
972 if (list_empty(&dwc->active_list))
973 dwc_dostart_first_queued(dwc);
974 spin_unlock_irqrestore(&dwc->lock, flags);
977 /*----------------------------------------------------------------------*/
979 void do_dw_dma_off(struct dw_dma *dw)
983 dma_writel(dw, CFG, 0);
985 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
986 channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
987 channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
988 channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
989 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
991 while (dma_readl(dw, CFG) & DW_CFG_DMA_EN)
994 for (i = 0; i < dw->dma.chancnt; i++)
995 clear_bit(DW_DMA_IS_INITIALIZED, &dw->chan[i].flags);
998 void do_dw_dma_on(struct dw_dma *dw)
1000 dma_writel(dw, CFG, DW_CFG_DMA_EN);
1003 static int dwc_alloc_chan_resources(struct dma_chan *chan)
1005 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1006 struct dw_dma *dw = to_dw_dma(chan->device);
1008 dev_vdbg(chan2dev(chan), "%s\n", __func__);
1010 /* ASSERT: channel is idle */
1011 if (dma_readl(dw, CH_EN) & dwc->mask) {
1012 dev_dbg(chan2dev(chan), "DMA channel not idle?\n");
1016 dma_cookie_init(chan);
1019 * NOTE: some controllers may have additional features that we
1020 * need to initialize here, like "scatter-gather" (which
1021 * doesn't mean what you think it means), and status writeback.
1025 * We need controller-specific data to set up slave transfers.
1027 if (chan->private && !dw_dma_filter(chan, chan->private)) {
1028 dev_warn(chan2dev(chan), "Wrong controller-specific data\n");
1032 /* Enable controller here if needed */
1035 dw->in_use |= dwc->mask;
1040 static void dwc_free_chan_resources(struct dma_chan *chan)
1042 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1043 struct dw_dma *dw = to_dw_dma(chan->device);
1044 unsigned long flags;
1047 dev_dbg(chan2dev(chan), "%s: descs allocated=%u\n", __func__,
1048 dwc->descs_allocated);
1050 /* ASSERT: channel is idle */
1051 BUG_ON(!list_empty(&dwc->active_list));
1052 BUG_ON(!list_empty(&dwc->queue));
1053 BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask);
1055 spin_lock_irqsave(&dwc->lock, flags);
1057 /* Clear custom channel configuration */
1058 memset(&dwc->dws, 0, sizeof(struct dw_dma_slave));
1060 clear_bit(DW_DMA_IS_INITIALIZED, &dwc->flags);
1062 /* Disable interrupts */
1063 channel_clear_bit(dw, MASK.XFER, dwc->mask);
1064 channel_clear_bit(dw, MASK.BLOCK, dwc->mask);
1065 channel_clear_bit(dw, MASK.ERROR, dwc->mask);
1067 spin_unlock_irqrestore(&dwc->lock, flags);
1069 /* Disable controller in case it was a last user */
1070 dw->in_use &= ~dwc->mask;
1074 dev_vdbg(chan2dev(chan), "%s: done\n", __func__);
1077 int do_dma_probe(struct dw_dma_chip *chip)
1079 struct dw_dma *dw = chip->dw;
1080 struct dw_dma_platform_data *pdata;
1081 bool autocfg = false;
1082 unsigned int dw_params;
1086 dw->pdata = devm_kzalloc(chip->dev, sizeof(*dw->pdata), GFP_KERNEL);
1090 dw->regs = chip->regs;
1092 pm_runtime_get_sync(chip->dev);
1095 dw_params = dma_readl(dw, DW_PARAMS);
1096 dev_dbg(chip->dev, "DW_PARAMS: 0x%08x\n", dw_params);
1098 autocfg = dw_params >> DW_PARAMS_EN & 1;
1104 /* Reassign the platform data pointer */
1107 /* Get hardware configuration parameters */
1108 pdata->nr_channels = (dw_params >> DW_PARAMS_NR_CHAN & 7) + 1;
1109 pdata->nr_masters = (dw_params >> DW_PARAMS_NR_MASTER & 3) + 1;
1110 for (i = 0; i < pdata->nr_masters; i++) {
1111 pdata->data_width[i] =
1112 4 << (dw_params >> DW_PARAMS_DATA_WIDTH(i) & 3);
1114 pdata->block_size = dma_readl(dw, MAX_BLK_SIZE);
1116 /* Fill platform data with the default values */
1117 pdata->chan_allocation_order = CHAN_ALLOCATION_ASCENDING;
1118 pdata->chan_priority = CHAN_PRIORITY_ASCENDING;
1119 } else if (chip->pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS) {
1123 memcpy(dw->pdata, chip->pdata, sizeof(*dw->pdata));
1125 /* Reassign the platform data pointer */
1129 dw->chan = devm_kcalloc(chip->dev, pdata->nr_channels, sizeof(*dw->chan),
1136 /* Calculate all channel mask before DMA setup */
1137 dw->all_chan_mask = (1 << pdata->nr_channels) - 1;
1139 /* Force dma off, just in case */
1142 /* Device and instance ID for IRQ and DMA pool */
1143 dw->set_device_name(dw, chip->id);
1145 /* Create a pool of consistent memory blocks for hardware descriptors */
1146 dw->desc_pool = dmam_pool_create(dw->name, chip->dev,
1147 sizeof(struct dw_desc), 4, 0);
1148 if (!dw->desc_pool) {
1149 dev_err(chip->dev, "No memory for descriptors dma pool\n");
1154 tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw);
1156 err = request_irq(chip->irq, dw_dma_interrupt, IRQF_SHARED,
1161 INIT_LIST_HEAD(&dw->dma.channels);
1162 for (i = 0; i < pdata->nr_channels; i++) {
1163 struct dw_dma_chan *dwc = &dw->chan[i];
1165 dwc->chan.device = &dw->dma;
1166 dma_cookie_init(&dwc->chan);
1167 if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING)
1168 list_add_tail(&dwc->chan.device_node,
1171 list_add(&dwc->chan.device_node, &dw->dma.channels);
1173 /* 7 is highest priority & 0 is lowest. */
1174 if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING)
1175 dwc->priority = pdata->nr_channels - i - 1;
1179 dwc->ch_regs = &__dw_regs(dw)->CHAN[i];
1180 spin_lock_init(&dwc->lock);
1183 INIT_LIST_HEAD(&dwc->active_list);
1184 INIT_LIST_HEAD(&dwc->queue);
1186 channel_clear_bit(dw, CH_EN, dwc->mask);
1188 dwc->direction = DMA_TRANS_NONE;
1190 /* Hardware configuration */
1192 unsigned int r = DW_DMA_MAX_NR_CHANNELS - i - 1;
1193 void __iomem *addr = &__dw_regs(dw)->DWC_PARAMS[r];
1194 unsigned int dwc_params = readl(addr);
1196 dev_dbg(chip->dev, "DWC_PARAMS[%d]: 0x%08x\n", i,
1200 * Decode maximum block size for given channel. The
1201 * stored 4 bit value represents blocks from 0x00 for 3
1202 * up to 0x0a for 4095.
1205 (4 << ((pdata->block_size >> 4 * i) & 0xf)) - 1;
1207 (dwc_params >> DWC_PARAMS_MBLK_EN & 0x1) == 0;
1209 dwc->block_size = pdata->block_size;
1210 dwc->nollp = !pdata->multi_block[i];
1214 /* Clear all interrupts on all channels. */
1215 dma_writel(dw, CLEAR.XFER, dw->all_chan_mask);
1216 dma_writel(dw, CLEAR.BLOCK, dw->all_chan_mask);
1217 dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask);
1218 dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask);
1219 dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask);
1221 /* Set capabilities */
1222 dma_cap_set(DMA_SLAVE, dw->dma.cap_mask);
1223 dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask);
1224 dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask);
1226 dw->dma.dev = chip->dev;
1227 dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources;
1228 dw->dma.device_free_chan_resources = dwc_free_chan_resources;
1230 dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy;
1231 dw->dma.device_prep_slave_sg = dwc_prep_slave_sg;
1233 dw->dma.device_config = dwc_config;
1234 dw->dma.device_pause = dwc_pause;
1235 dw->dma.device_resume = dwc_resume;
1236 dw->dma.device_terminate_all = dwc_terminate_all;
1238 dw->dma.device_tx_status = dwc_tx_status;
1239 dw->dma.device_issue_pending = dwc_issue_pending;
1241 /* DMA capabilities */
1242 dw->dma.src_addr_widths = DW_DMA_BUSWIDTHS;
1243 dw->dma.dst_addr_widths = DW_DMA_BUSWIDTHS;
1244 dw->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV) |
1245 BIT(DMA_MEM_TO_MEM);
1246 dw->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1248 err = dma_async_device_register(&dw->dma);
1250 goto err_dma_register;
1252 dev_info(chip->dev, "DesignWare DMA Controller, %d channels\n",
1253 pdata->nr_channels);
1255 pm_runtime_put_sync_suspend(chip->dev);
1260 free_irq(chip->irq, dw);
1262 pm_runtime_put_sync_suspend(chip->dev);
1266 int do_dma_remove(struct dw_dma_chip *chip)
1268 struct dw_dma *dw = chip->dw;
1269 struct dw_dma_chan *dwc, *_dwc;
1271 pm_runtime_get_sync(chip->dev);
1274 dma_async_device_unregister(&dw->dma);
1276 free_irq(chip->irq, dw);
1277 tasklet_kill(&dw->tasklet);
1279 list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels,
1281 list_del(&dwc->chan.device_node);
1282 channel_clear_bit(dw, CH_EN, dwc->mask);
1285 pm_runtime_put_sync_suspend(chip->dev);
1289 int do_dw_dma_disable(struct dw_dma_chip *chip)
1291 struct dw_dma *dw = chip->dw;
1296 EXPORT_SYMBOL_GPL(do_dw_dma_disable);
1298 int do_dw_dma_enable(struct dw_dma_chip *chip)
1300 struct dw_dma *dw = chip->dw;
1305 EXPORT_SYMBOL_GPL(do_dw_dma_enable);
1307 MODULE_LICENSE("GPL v2");
1308 MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller core driver");
1309 MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
1310 MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>");