2 * Core driver for the Synopsys DesignWare DMA Controller
4 * Copyright (C) 2007-2008 Atmel Corporation
5 * Copyright (C) 2010-2011 ST Microelectronics
6 * Copyright (C) 2013 Intel Corporation
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/bitops.h>
14 #include <linux/delay.h>
15 #include <linux/dmaengine.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/dmapool.h>
18 #include <linux/err.h>
19 #include <linux/init.h>
20 #include <linux/interrupt.h>
23 #include <linux/module.h>
24 #include <linux/slab.h>
25 #include <linux/pm_runtime.h>
27 #include "../dmaengine.h"
31 * This supports the Synopsys "DesignWare AHB Central DMA Controller",
32 * (DW_ahb_dmac) which is used with various AMBA 2.0 systems (not all
33 * of which use ARM any more). See the "Databook" from Synopsys for
34 * information beyond what licensees probably provide.
36 * The driver has been tested with the Atmel AT32AP7000, which does not
37 * support descriptor writeback.
40 /* The set of bus widths supported by the DMA controller */
41 #define DW_DMA_BUSWIDTHS \
42 BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
43 BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
44 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
45 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)
47 /*----------------------------------------------------------------------*/
49 static struct device *chan2dev(struct dma_chan *chan)
51 return &chan->dev->device;
54 static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc)
56 return to_dw_desc(dwc->active_list.next);
59 static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
61 struct dw_desc *desc = txd_to_dw_desc(tx);
62 struct dw_dma_chan *dwc = to_dw_dma_chan(tx->chan);
66 spin_lock_irqsave(&dwc->lock, flags);
67 cookie = dma_cookie_assign(tx);
70 * REVISIT: We should attempt to chain as many descriptors as
71 * possible, perhaps even appending to those already submitted
72 * for DMA. But this is hard to do in a race-free manner.
75 list_add_tail(&desc->desc_node, &dwc->queue);
76 spin_unlock_irqrestore(&dwc->lock, flags);
77 dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n",
78 __func__, desc->txd.cookie);
83 static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
85 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
89 desc = dma_pool_zalloc(dw->desc_pool, GFP_ATOMIC, &phys);
93 dwc->descs_allocated++;
94 INIT_LIST_HEAD(&desc->tx_list);
95 dma_async_tx_descriptor_init(&desc->txd, &dwc->chan);
96 desc->txd.tx_submit = dwc_tx_submit;
97 desc->txd.flags = DMA_CTRL_ACK;
98 desc->txd.phys = phys;
102 static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
104 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
105 struct dw_desc *child, *_next;
110 list_for_each_entry_safe(child, _next, &desc->tx_list, desc_node) {
111 list_del(&child->desc_node);
112 dma_pool_free(dw->desc_pool, child, child->txd.phys);
113 dwc->descs_allocated--;
116 dma_pool_free(dw->desc_pool, desc, desc->txd.phys);
117 dwc->descs_allocated--;
120 static void dwc_initialize(struct dw_dma_chan *dwc)
122 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
124 if (test_bit(DW_DMA_IS_INITIALIZED, &dwc->flags))
127 dw->initialize_chan(dwc);
129 /* Enable interrupts */
130 channel_set_bit(dw, MASK.XFER, dwc->mask);
131 channel_set_bit(dw, MASK.ERROR, dwc->mask);
133 set_bit(DW_DMA_IS_INITIALIZED, &dwc->flags);
136 /*----------------------------------------------------------------------*/
138 static inline void dwc_dump_chan_regs(struct dw_dma_chan *dwc)
140 dev_err(chan2dev(&dwc->chan),
141 " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
142 channel_readl(dwc, SAR),
143 channel_readl(dwc, DAR),
144 channel_readl(dwc, LLP),
145 channel_readl(dwc, CTL_HI),
146 channel_readl(dwc, CTL_LO));
149 static inline void dwc_chan_disable(struct dw_dma *dw, struct dw_dma_chan *dwc)
151 channel_clear_bit(dw, CH_EN, dwc->mask);
152 while (dma_readl(dw, CH_EN) & dwc->mask)
156 /*----------------------------------------------------------------------*/
158 /* Perform single block transfer */
159 static inline void dwc_do_single_block(struct dw_dma_chan *dwc,
160 struct dw_desc *desc)
162 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
166 * Software emulation of LLP mode relies on interrupts to continue
167 * multi block transfer.
169 ctllo = lli_read(desc, ctllo) | DWC_CTLL_INT_EN;
171 channel_writel(dwc, SAR, lli_read(desc, sar));
172 channel_writel(dwc, DAR, lli_read(desc, dar));
173 channel_writel(dwc, CTL_LO, ctllo);
174 channel_writel(dwc, CTL_HI, lli_read(desc, ctlhi));
175 channel_set_bit(dw, CH_EN, dwc->mask);
177 /* Move pointer to next descriptor */
178 dwc->tx_node_active = dwc->tx_node_active->next;
181 /* Called with dwc->lock held and bh disabled */
182 static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
184 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
185 u8 lms = DWC_LLP_LMS(dwc->dws.m_master);
186 unsigned long was_soft_llp;
188 /* ASSERT: channel is idle */
189 if (dma_readl(dw, CH_EN) & dwc->mask) {
190 dev_err(chan2dev(&dwc->chan),
191 "%s: BUG: Attempted to start non-idle channel\n",
193 dwc_dump_chan_regs(dwc);
195 /* The tasklet will hopefully advance the queue... */
200 was_soft_llp = test_and_set_bit(DW_DMA_IS_SOFT_LLP,
203 dev_err(chan2dev(&dwc->chan),
204 "BUG: Attempted to start new LLP transfer inside ongoing one\n");
210 first->residue = first->total_len;
211 dwc->tx_node_active = &first->tx_list;
213 /* Submit first block */
214 dwc_do_single_block(dwc, first);
221 channel_writel(dwc, LLP, first->txd.phys | lms);
222 channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
223 channel_writel(dwc, CTL_HI, 0);
224 channel_set_bit(dw, CH_EN, dwc->mask);
227 static void dwc_dostart_first_queued(struct dw_dma_chan *dwc)
229 struct dw_desc *desc;
231 if (list_empty(&dwc->queue))
234 list_move(dwc->queue.next, &dwc->active_list);
235 desc = dwc_first_active(dwc);
236 dev_vdbg(chan2dev(&dwc->chan), "%s: started %u\n", __func__, desc->txd.cookie);
237 dwc_dostart(dwc, desc);
240 /*----------------------------------------------------------------------*/
243 dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc,
244 bool callback_required)
246 struct dma_async_tx_descriptor *txd = &desc->txd;
247 struct dw_desc *child;
249 struct dmaengine_desc_callback cb;
251 dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie);
253 spin_lock_irqsave(&dwc->lock, flags);
254 dma_cookie_complete(txd);
255 if (callback_required)
256 dmaengine_desc_get_callback(txd, &cb);
258 memset(&cb, 0, sizeof(cb));
261 list_for_each_entry(child, &desc->tx_list, desc_node)
262 async_tx_ack(&child->txd);
263 async_tx_ack(&desc->txd);
264 dwc_desc_put(dwc, desc);
265 spin_unlock_irqrestore(&dwc->lock, flags);
267 dmaengine_desc_callback_invoke(&cb, NULL);
270 static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
272 struct dw_desc *desc, *_desc;
276 spin_lock_irqsave(&dwc->lock, flags);
277 if (dma_readl(dw, CH_EN) & dwc->mask) {
278 dev_err(chan2dev(&dwc->chan),
279 "BUG: XFER bit set, but channel not idle!\n");
281 /* Try to continue after resetting the channel... */
282 dwc_chan_disable(dw, dwc);
286 * Submit queued descriptors ASAP, i.e. before we go through
287 * the completed ones.
289 list_splice_init(&dwc->active_list, &list);
290 dwc_dostart_first_queued(dwc);
292 spin_unlock_irqrestore(&dwc->lock, flags);
294 list_for_each_entry_safe(desc, _desc, &list, desc_node)
295 dwc_descriptor_complete(dwc, desc, true);
298 /* Returns how many bytes were already received from source */
299 static inline u32 dwc_get_sent(struct dw_dma_chan *dwc)
301 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
302 u32 ctlhi = channel_readl(dwc, CTL_HI);
303 u32 ctllo = channel_readl(dwc, CTL_LO);
305 return dw->block2bytes(dwc, ctlhi, ctllo >> 4 & 7);
308 static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
311 struct dw_desc *desc, *_desc;
312 struct dw_desc *child;
316 spin_lock_irqsave(&dwc->lock, flags);
317 llp = channel_readl(dwc, LLP);
318 status_xfer = dma_readl(dw, RAW.XFER);
320 if (status_xfer & dwc->mask) {
321 /* Everything we've submitted is done */
322 dma_writel(dw, CLEAR.XFER, dwc->mask);
324 if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) {
325 struct list_head *head, *active = dwc->tx_node_active;
328 * We are inside first active descriptor.
329 * Otherwise something is really wrong.
331 desc = dwc_first_active(dwc);
333 head = &desc->tx_list;
334 if (active != head) {
335 /* Update residue to reflect last sent descriptor */
336 if (active == head->next)
337 desc->residue -= desc->len;
339 desc->residue -= to_dw_desc(active->prev)->len;
341 child = to_dw_desc(active);
343 /* Submit next block */
344 dwc_do_single_block(dwc, child);
346 spin_unlock_irqrestore(&dwc->lock, flags);
350 /* We are done here */
351 clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags);
354 spin_unlock_irqrestore(&dwc->lock, flags);
356 dwc_complete_all(dw, dwc);
360 if (list_empty(&dwc->active_list)) {
361 spin_unlock_irqrestore(&dwc->lock, flags);
365 if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) {
366 dev_vdbg(chan2dev(&dwc->chan), "%s: soft LLP mode\n", __func__);
367 spin_unlock_irqrestore(&dwc->lock, flags);
371 dev_vdbg(chan2dev(&dwc->chan), "%s: llp=%pad\n", __func__, &llp);
373 list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) {
374 /* Initial residue value */
375 desc->residue = desc->total_len;
377 /* Check first descriptors addr */
378 if (desc->txd.phys == DWC_LLP_LOC(llp)) {
379 spin_unlock_irqrestore(&dwc->lock, flags);
383 /* Check first descriptors llp */
384 if (lli_read(desc, llp) == llp) {
385 /* This one is currently in progress */
386 desc->residue -= dwc_get_sent(dwc);
387 spin_unlock_irqrestore(&dwc->lock, flags);
391 desc->residue -= desc->len;
392 list_for_each_entry(child, &desc->tx_list, desc_node) {
393 if (lli_read(child, llp) == llp) {
394 /* Currently in progress */
395 desc->residue -= dwc_get_sent(dwc);
396 spin_unlock_irqrestore(&dwc->lock, flags);
399 desc->residue -= child->len;
403 * No descriptors so far seem to be in progress, i.e.
404 * this one must be done.
406 spin_unlock_irqrestore(&dwc->lock, flags);
407 dwc_descriptor_complete(dwc, desc, true);
408 spin_lock_irqsave(&dwc->lock, flags);
411 dev_err(chan2dev(&dwc->chan),
412 "BUG: All descriptors done, but channel not idle!\n");
414 /* Try to continue after resetting the channel... */
415 dwc_chan_disable(dw, dwc);
417 dwc_dostart_first_queued(dwc);
418 spin_unlock_irqrestore(&dwc->lock, flags);
421 static inline void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_desc *desc)
423 dev_crit(chan2dev(&dwc->chan), " desc: s0x%x d0x%x l0x%x c0x%x:%x\n",
427 lli_read(desc, ctlhi),
428 lli_read(desc, ctllo));
431 static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
433 struct dw_desc *bad_desc;
434 struct dw_desc *child;
437 dwc_scan_descriptors(dw, dwc);
439 spin_lock_irqsave(&dwc->lock, flags);
442 * The descriptor currently at the head of the active list is
443 * borked. Since we don't have any way to report errors, we'll
444 * just have to scream loudly and try to carry on.
446 bad_desc = dwc_first_active(dwc);
447 list_del_init(&bad_desc->desc_node);
448 list_move(dwc->queue.next, dwc->active_list.prev);
450 /* Clear the error flag and try to restart the controller */
451 dma_writel(dw, CLEAR.ERROR, dwc->mask);
452 if (!list_empty(&dwc->active_list))
453 dwc_dostart(dwc, dwc_first_active(dwc));
456 * WARN may seem harsh, but since this only happens
457 * when someone submits a bad physical address in a
458 * descriptor, we should consider ourselves lucky that the
459 * controller flagged an error instead of scribbling over
460 * random memory locations.
462 dev_WARN(chan2dev(&dwc->chan), "Bad descriptor submitted for DMA!\n"
463 " cookie: %d\n", bad_desc->txd.cookie);
464 dwc_dump_lli(dwc, bad_desc);
465 list_for_each_entry(child, &bad_desc->tx_list, desc_node)
466 dwc_dump_lli(dwc, child);
468 spin_unlock_irqrestore(&dwc->lock, flags);
470 /* Pretend the descriptor completed successfully */
471 dwc_descriptor_complete(dwc, bad_desc, true);
474 static void dw_dma_tasklet(unsigned long data)
476 struct dw_dma *dw = (struct dw_dma *)data;
477 struct dw_dma_chan *dwc;
482 status_xfer = dma_readl(dw, RAW.XFER);
483 status_err = dma_readl(dw, RAW.ERROR);
485 dev_vdbg(dw->dma.dev, "%s: status_err=%x\n", __func__, status_err);
487 for (i = 0; i < dw->dma.chancnt; i++) {
489 if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags))
490 dev_vdbg(dw->dma.dev, "Cyclic xfer is not implemented\n");
491 else if (status_err & (1 << i))
492 dwc_handle_error(dw, dwc);
493 else if (status_xfer & (1 << i))
494 dwc_scan_descriptors(dw, dwc);
497 /* Re-enable interrupts */
498 channel_set_bit(dw, MASK.XFER, dw->all_chan_mask);
499 channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask);
502 static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
504 struct dw_dma *dw = dev_id;
507 /* Check if we have any interrupt from the DMAC which is not in use */
511 status = dma_readl(dw, STATUS_INT);
512 dev_vdbg(dw->dma.dev, "%s: status=0x%x\n", __func__, status);
514 /* Check if we have any interrupt from the DMAC */
519 * Just disable the interrupts. We'll turn them back on in the
522 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
523 channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
524 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
526 status = dma_readl(dw, STATUS_INT);
529 "BUG: Unexpected interrupts pending: 0x%x\n",
533 channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1);
534 channel_clear_bit(dw, MASK.BLOCK, (1 << 8) - 1);
535 channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1);
536 channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1);
537 channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1);
540 tasklet_schedule(&dw->tasklet);
545 /*----------------------------------------------------------------------*/
547 static struct dma_async_tx_descriptor *
548 dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
549 size_t len, unsigned long flags)
551 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
552 struct dw_dma *dw = to_dw_dma(chan->device);
553 struct dw_desc *desc;
554 struct dw_desc *first;
555 struct dw_desc *prev;
558 u8 m_master = dwc->dws.m_master;
559 unsigned int src_width;
560 unsigned int dst_width;
561 unsigned int data_width = dw->pdata->data_width[m_master];
563 u8 lms = DWC_LLP_LMS(m_master);
565 dev_vdbg(chan2dev(chan),
566 "%s: d%pad s%pad l0x%zx f0x%lx\n", __func__,
567 &dest, &src, len, flags);
569 if (unlikely(!len)) {
570 dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__);
574 dwc->direction = DMA_MEM_TO_MEM;
576 src_width = dst_width = __ffs(data_width | src | dest | len);
578 ctllo = dw->prepare_ctllo(dwc)
579 | DWC_CTLL_DST_WIDTH(dst_width)
580 | DWC_CTLL_SRC_WIDTH(src_width)
586 for (offset = 0; offset < len; offset += xfer_count) {
587 desc = dwc_desc_get(dwc);
591 ctlhi = dw->bytes2block(dwc, len - offset, src_width, &xfer_count);
593 lli_write(desc, sar, src + offset);
594 lli_write(desc, dar, dest + offset);
595 lli_write(desc, ctllo, ctllo);
596 lli_write(desc, ctlhi, ctlhi);
597 desc->len = xfer_count;
602 lli_write(prev, llp, desc->txd.phys | lms);
603 list_add_tail(&desc->desc_node, &first->tx_list);
608 if (flags & DMA_PREP_INTERRUPT)
609 /* Trigger interrupt after last block */
610 lli_set(prev, ctllo, DWC_CTLL_INT_EN);
613 lli_clear(prev, ctllo, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
614 first->txd.flags = flags;
615 first->total_len = len;
620 dwc_desc_put(dwc, first);
624 static struct dma_async_tx_descriptor *
625 dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
626 unsigned int sg_len, enum dma_transfer_direction direction,
627 unsigned long flags, void *context)
629 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
630 struct dw_dma *dw = to_dw_dma(chan->device);
631 struct dma_slave_config *sconfig = &dwc->dma_sconfig;
632 struct dw_desc *prev;
633 struct dw_desc *first;
635 u8 m_master = dwc->dws.m_master;
636 u8 lms = DWC_LLP_LMS(m_master);
638 unsigned int reg_width;
639 unsigned int mem_width;
640 unsigned int data_width = dw->pdata->data_width[m_master];
642 struct scatterlist *sg;
643 size_t total_len = 0;
645 dev_vdbg(chan2dev(chan), "%s\n", __func__);
647 if (unlikely(!is_slave_direction(direction) || !sg_len))
650 dwc->direction = direction;
656 reg_width = __ffs(sconfig->dst_addr_width);
657 reg = sconfig->dst_addr;
658 ctllo = dw->prepare_ctllo(dwc)
659 | DWC_CTLL_DST_WIDTH(reg_width)
663 ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
664 DWC_CTLL_FC(DW_DMA_FC_D_M2P);
666 for_each_sg(sgl, sg, sg_len, i) {
667 struct dw_desc *desc;
671 mem = sg_dma_address(sg);
672 len = sg_dma_len(sg);
674 mem_width = __ffs(data_width | mem | len);
676 slave_sg_todev_fill_desc:
677 desc = dwc_desc_get(dwc);
681 ctlhi = dw->bytes2block(dwc, len, mem_width, &dlen);
683 lli_write(desc, sar, mem);
684 lli_write(desc, dar, reg);
685 lli_write(desc, ctlhi, ctlhi);
686 lli_write(desc, ctllo, ctllo | DWC_CTLL_SRC_WIDTH(mem_width));
692 lli_write(prev, llp, desc->txd.phys | lms);
693 list_add_tail(&desc->desc_node, &first->tx_list);
702 goto slave_sg_todev_fill_desc;
706 reg_width = __ffs(sconfig->src_addr_width);
707 reg = sconfig->src_addr;
708 ctllo = dw->prepare_ctllo(dwc)
709 | DWC_CTLL_SRC_WIDTH(reg_width)
713 ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
714 DWC_CTLL_FC(DW_DMA_FC_D_P2M);
716 for_each_sg(sgl, sg, sg_len, i) {
717 struct dw_desc *desc;
721 mem = sg_dma_address(sg);
722 len = sg_dma_len(sg);
724 slave_sg_fromdev_fill_desc:
725 desc = dwc_desc_get(dwc);
729 ctlhi = dw->bytes2block(dwc, len, reg_width, &dlen);
731 lli_write(desc, sar, reg);
732 lli_write(desc, dar, mem);
733 lli_write(desc, ctlhi, ctlhi);
734 mem_width = __ffs(data_width | mem | dlen);
735 lli_write(desc, ctllo, ctllo | DWC_CTLL_DST_WIDTH(mem_width));
741 lli_write(prev, llp, desc->txd.phys | lms);
742 list_add_tail(&desc->desc_node, &first->tx_list);
751 goto slave_sg_fromdev_fill_desc;
758 if (flags & DMA_PREP_INTERRUPT)
759 /* Trigger interrupt after last block */
760 lli_set(prev, ctllo, DWC_CTLL_INT_EN);
763 lli_clear(prev, ctllo, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
764 first->total_len = total_len;
769 dev_err(chan2dev(chan),
770 "not enough descriptors available. Direction %d\n", direction);
771 dwc_desc_put(dwc, first);
775 bool dw_dma_filter(struct dma_chan *chan, void *param)
777 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
778 struct dw_dma_slave *dws = param;
780 if (dws->dma_dev != chan->device->dev)
783 /* We have to copy data since dws can be temporary storage */
784 memcpy(&dwc->dws, dws, sizeof(struct dw_dma_slave));
788 EXPORT_SYMBOL_GPL(dw_dma_filter);
790 static int dwc_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
792 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
793 struct dw_dma *dw = to_dw_dma(chan->device);
795 memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig));
797 dw->encode_maxburst(dwc, &dwc->dma_sconfig.src_maxburst);
798 dw->encode_maxburst(dwc, &dwc->dma_sconfig.dst_maxburst);
803 static void dwc_chan_pause(struct dw_dma_chan *dwc, bool drain)
805 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
806 unsigned int count = 20; /* timeout iterations */
808 dw->suspend_chan(dwc, drain);
810 while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY) && count--)
813 set_bit(DW_DMA_IS_PAUSED, &dwc->flags);
816 static int dwc_pause(struct dma_chan *chan)
818 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
821 spin_lock_irqsave(&dwc->lock, flags);
822 dwc_chan_pause(dwc, false);
823 spin_unlock_irqrestore(&dwc->lock, flags);
828 static inline void dwc_chan_resume(struct dw_dma_chan *dwc, bool drain)
830 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
832 dw->resume_chan(dwc, drain);
834 clear_bit(DW_DMA_IS_PAUSED, &dwc->flags);
837 static int dwc_resume(struct dma_chan *chan)
839 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
842 spin_lock_irqsave(&dwc->lock, flags);
844 if (test_bit(DW_DMA_IS_PAUSED, &dwc->flags))
845 dwc_chan_resume(dwc, false);
847 spin_unlock_irqrestore(&dwc->lock, flags);
852 static int dwc_terminate_all(struct dma_chan *chan)
854 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
855 struct dw_dma *dw = to_dw_dma(chan->device);
856 struct dw_desc *desc, *_desc;
860 spin_lock_irqsave(&dwc->lock, flags);
862 clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags);
864 dwc_chan_pause(dwc, true);
866 dwc_chan_disable(dw, dwc);
868 dwc_chan_resume(dwc, true);
870 /* active_list entries will end up before queued entries */
871 list_splice_init(&dwc->queue, &list);
872 list_splice_init(&dwc->active_list, &list);
874 spin_unlock_irqrestore(&dwc->lock, flags);
876 /* Flush all pending and queued descriptors */
877 list_for_each_entry_safe(desc, _desc, &list, desc_node)
878 dwc_descriptor_complete(dwc, desc, false);
883 static struct dw_desc *dwc_find_desc(struct dw_dma_chan *dwc, dma_cookie_t c)
885 struct dw_desc *desc;
887 list_for_each_entry(desc, &dwc->active_list, desc_node)
888 if (desc->txd.cookie == c)
894 static u32 dwc_get_residue(struct dw_dma_chan *dwc, dma_cookie_t cookie)
896 struct dw_desc *desc;
900 spin_lock_irqsave(&dwc->lock, flags);
902 desc = dwc_find_desc(dwc, cookie);
904 if (desc == dwc_first_active(dwc)) {
905 residue = desc->residue;
906 if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags) && residue)
907 residue -= dwc_get_sent(dwc);
909 residue = desc->total_len;
915 spin_unlock_irqrestore(&dwc->lock, flags);
919 static enum dma_status
920 dwc_tx_status(struct dma_chan *chan,
922 struct dma_tx_state *txstate)
924 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
927 ret = dma_cookie_status(chan, cookie, txstate);
928 if (ret == DMA_COMPLETE)
931 dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
933 ret = dma_cookie_status(chan, cookie, txstate);
934 if (ret == DMA_COMPLETE)
937 dma_set_residue(txstate, dwc_get_residue(dwc, cookie));
939 if (test_bit(DW_DMA_IS_PAUSED, &dwc->flags) && ret == DMA_IN_PROGRESS)
945 static void dwc_issue_pending(struct dma_chan *chan)
947 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
950 spin_lock_irqsave(&dwc->lock, flags);
951 if (list_empty(&dwc->active_list))
952 dwc_dostart_first_queued(dwc);
953 spin_unlock_irqrestore(&dwc->lock, flags);
956 /*----------------------------------------------------------------------*/
958 void do_dw_dma_off(struct dw_dma *dw)
962 dma_writel(dw, CFG, 0);
964 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
965 channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
966 channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
967 channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
968 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
970 while (dma_readl(dw, CFG) & DW_CFG_DMA_EN)
973 for (i = 0; i < dw->dma.chancnt; i++)
974 clear_bit(DW_DMA_IS_INITIALIZED, &dw->chan[i].flags);
977 void do_dw_dma_on(struct dw_dma *dw)
979 dma_writel(dw, CFG, DW_CFG_DMA_EN);
982 static int dwc_alloc_chan_resources(struct dma_chan *chan)
984 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
985 struct dw_dma *dw = to_dw_dma(chan->device);
987 dev_vdbg(chan2dev(chan), "%s\n", __func__);
989 /* ASSERT: channel is idle */
990 if (dma_readl(dw, CH_EN) & dwc->mask) {
991 dev_dbg(chan2dev(chan), "DMA channel not idle?\n");
995 dma_cookie_init(chan);
998 * NOTE: some controllers may have additional features that we
999 * need to initialize here, like "scatter-gather" (which
1000 * doesn't mean what you think it means), and status writeback.
1004 * We need controller-specific data to set up slave transfers.
1006 if (chan->private && !dw_dma_filter(chan, chan->private)) {
1007 dev_warn(chan2dev(chan), "Wrong controller-specific data\n");
1011 /* Enable controller here if needed */
1014 dw->in_use |= dwc->mask;
1019 static void dwc_free_chan_resources(struct dma_chan *chan)
1021 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1022 struct dw_dma *dw = to_dw_dma(chan->device);
1023 unsigned long flags;
1026 dev_dbg(chan2dev(chan), "%s: descs allocated=%u\n", __func__,
1027 dwc->descs_allocated);
1029 /* ASSERT: channel is idle */
1030 BUG_ON(!list_empty(&dwc->active_list));
1031 BUG_ON(!list_empty(&dwc->queue));
1032 BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask);
1034 spin_lock_irqsave(&dwc->lock, flags);
1036 /* Clear custom channel configuration */
1037 memset(&dwc->dws, 0, sizeof(struct dw_dma_slave));
1039 clear_bit(DW_DMA_IS_INITIALIZED, &dwc->flags);
1041 /* Disable interrupts */
1042 channel_clear_bit(dw, MASK.XFER, dwc->mask);
1043 channel_clear_bit(dw, MASK.BLOCK, dwc->mask);
1044 channel_clear_bit(dw, MASK.ERROR, dwc->mask);
1046 spin_unlock_irqrestore(&dwc->lock, flags);
1048 /* Disable controller in case it was a last user */
1049 dw->in_use &= ~dwc->mask;
1053 dev_vdbg(chan2dev(chan), "%s: done\n", __func__);
1056 int do_dma_probe(struct dw_dma_chip *chip)
1058 struct dw_dma *dw = chip->dw;
1059 struct dw_dma_platform_data *pdata;
1060 bool autocfg = false;
1061 unsigned int dw_params;
1065 dw->pdata = devm_kzalloc(chip->dev, sizeof(*dw->pdata), GFP_KERNEL);
1069 dw->regs = chip->regs;
1071 pm_runtime_get_sync(chip->dev);
1074 dw_params = dma_readl(dw, DW_PARAMS);
1075 dev_dbg(chip->dev, "DW_PARAMS: 0x%08x\n", dw_params);
1077 autocfg = dw_params >> DW_PARAMS_EN & 1;
1083 /* Reassign the platform data pointer */
1086 /* Get hardware configuration parameters */
1087 pdata->nr_channels = (dw_params >> DW_PARAMS_NR_CHAN & 7) + 1;
1088 pdata->nr_masters = (dw_params >> DW_PARAMS_NR_MASTER & 3) + 1;
1089 for (i = 0; i < pdata->nr_masters; i++) {
1090 pdata->data_width[i] =
1091 4 << (dw_params >> DW_PARAMS_DATA_WIDTH(i) & 3);
1093 pdata->block_size = dma_readl(dw, MAX_BLK_SIZE);
1095 /* Fill platform data with the default values */
1096 pdata->chan_allocation_order = CHAN_ALLOCATION_ASCENDING;
1097 pdata->chan_priority = CHAN_PRIORITY_ASCENDING;
1098 } else if (chip->pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS) {
1102 memcpy(dw->pdata, chip->pdata, sizeof(*dw->pdata));
1104 /* Reassign the platform data pointer */
1108 dw->chan = devm_kcalloc(chip->dev, pdata->nr_channels, sizeof(*dw->chan),
1115 /* Calculate all channel mask before DMA setup */
1116 dw->all_chan_mask = (1 << pdata->nr_channels) - 1;
1118 /* Force dma off, just in case */
1121 /* Device and instance ID for IRQ and DMA pool */
1122 dw->set_device_name(dw, chip->id);
1124 /* Create a pool of consistent memory blocks for hardware descriptors */
1125 dw->desc_pool = dmam_pool_create(dw->name, chip->dev,
1126 sizeof(struct dw_desc), 4, 0);
1127 if (!dw->desc_pool) {
1128 dev_err(chip->dev, "No memory for descriptors dma pool\n");
1133 tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw);
1135 err = request_irq(chip->irq, dw_dma_interrupt, IRQF_SHARED,
1140 INIT_LIST_HEAD(&dw->dma.channels);
1141 for (i = 0; i < pdata->nr_channels; i++) {
1142 struct dw_dma_chan *dwc = &dw->chan[i];
1144 dwc->chan.device = &dw->dma;
1145 dma_cookie_init(&dwc->chan);
1146 if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING)
1147 list_add_tail(&dwc->chan.device_node,
1150 list_add(&dwc->chan.device_node, &dw->dma.channels);
1152 /* 7 is highest priority & 0 is lowest. */
1153 if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING)
1154 dwc->priority = pdata->nr_channels - i - 1;
1158 dwc->ch_regs = &__dw_regs(dw)->CHAN[i];
1159 spin_lock_init(&dwc->lock);
1162 INIT_LIST_HEAD(&dwc->active_list);
1163 INIT_LIST_HEAD(&dwc->queue);
1165 channel_clear_bit(dw, CH_EN, dwc->mask);
1167 dwc->direction = DMA_TRANS_NONE;
1169 /* Hardware configuration */
1171 unsigned int r = DW_DMA_MAX_NR_CHANNELS - i - 1;
1172 void __iomem *addr = &__dw_regs(dw)->DWC_PARAMS[r];
1173 unsigned int dwc_params = readl(addr);
1175 dev_dbg(chip->dev, "DWC_PARAMS[%d]: 0x%08x\n", i,
1179 * Decode maximum block size for given channel. The
1180 * stored 4 bit value represents blocks from 0x00 for 3
1181 * up to 0x0a for 4095.
1184 (4 << ((pdata->block_size >> 4 * i) & 0xf)) - 1;
1186 (dwc_params >> DWC_PARAMS_MBLK_EN & 0x1) == 0;
1188 dwc->block_size = pdata->block_size;
1189 dwc->nollp = !pdata->multi_block[i];
1193 /* Clear all interrupts on all channels. */
1194 dma_writel(dw, CLEAR.XFER, dw->all_chan_mask);
1195 dma_writel(dw, CLEAR.BLOCK, dw->all_chan_mask);
1196 dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask);
1197 dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask);
1198 dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask);
1200 /* Set capabilities */
1201 dma_cap_set(DMA_SLAVE, dw->dma.cap_mask);
1202 dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask);
1203 dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask);
1205 dw->dma.dev = chip->dev;
1206 dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources;
1207 dw->dma.device_free_chan_resources = dwc_free_chan_resources;
1209 dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy;
1210 dw->dma.device_prep_slave_sg = dwc_prep_slave_sg;
1212 dw->dma.device_config = dwc_config;
1213 dw->dma.device_pause = dwc_pause;
1214 dw->dma.device_resume = dwc_resume;
1215 dw->dma.device_terminate_all = dwc_terminate_all;
1217 dw->dma.device_tx_status = dwc_tx_status;
1218 dw->dma.device_issue_pending = dwc_issue_pending;
1220 /* DMA capabilities */
1221 dw->dma.src_addr_widths = DW_DMA_BUSWIDTHS;
1222 dw->dma.dst_addr_widths = DW_DMA_BUSWIDTHS;
1223 dw->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV) |
1224 BIT(DMA_MEM_TO_MEM);
1225 dw->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1227 err = dma_async_device_register(&dw->dma);
1229 goto err_dma_register;
1231 dev_info(chip->dev, "DesignWare DMA Controller, %d channels\n",
1232 pdata->nr_channels);
1234 pm_runtime_put_sync_suspend(chip->dev);
1239 free_irq(chip->irq, dw);
1241 pm_runtime_put_sync_suspend(chip->dev);
1245 int do_dma_remove(struct dw_dma_chip *chip)
1247 struct dw_dma *dw = chip->dw;
1248 struct dw_dma_chan *dwc, *_dwc;
1250 pm_runtime_get_sync(chip->dev);
1253 dma_async_device_unregister(&dw->dma);
1255 free_irq(chip->irq, dw);
1256 tasklet_kill(&dw->tasklet);
1258 list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels,
1260 list_del(&dwc->chan.device_node);
1261 channel_clear_bit(dw, CH_EN, dwc->mask);
1264 pm_runtime_put_sync_suspend(chip->dev);
1268 int do_dw_dma_disable(struct dw_dma_chip *chip)
1270 struct dw_dma *dw = chip->dw;
1275 EXPORT_SYMBOL_GPL(do_dw_dma_disable);
1277 int do_dw_dma_enable(struct dw_dma_chip *chip)
1279 struct dw_dma *dw = chip->dw;
1284 EXPORT_SYMBOL_GPL(do_dw_dma_enable);
1286 MODULE_LICENSE("GPL v2");
1287 MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller core driver");
1288 MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
1289 MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>");