]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/dma/dw/core.c
6a23203e601da6ed840439423bd5981c7bff6d4a
[linux.git] / drivers / dma / dw / core.c
1 /*
2  * Core driver for the Synopsys DesignWare DMA Controller
3  *
4  * Copyright (C) 2007-2008 Atmel Corporation
5  * Copyright (C) 2010-2011 ST Microelectronics
6  * Copyright (C) 2013 Intel Corporation
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  */
12
13 #include <linux/bitops.h>
14 #include <linux/delay.h>
15 #include <linux/dmaengine.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/dmapool.h>
18 #include <linux/err.h>
19 #include <linux/init.h>
20 #include <linux/interrupt.h>
21 #include <linux/io.h>
22 #include <linux/mm.h>
23 #include <linux/module.h>
24 #include <linux/slab.h>
25 #include <linux/pm_runtime.h>
26
27 #include "../dmaengine.h"
28 #include "internal.h"
29
30 /*
31  * This supports the Synopsys "DesignWare AHB Central DMA Controller",
32  * (DW_ahb_dmac) which is used with various AMBA 2.0 systems (not all
33  * of which use ARM any more).  See the "Databook" from Synopsys for
34  * information beyond what licensees probably provide.
35  *
36  * The driver has been tested with the Atmel AT32AP7000, which does not
37  * support descriptor writeback.
38  */
39
40 #define DWC_DEFAULT_CTLLO(_chan) ({                             \
41                 struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan);       \
42                 struct dma_slave_config *_sconfig = &_dwc->dma_sconfig; \
43                 bool _is_slave = is_slave_direction(_dwc->direction);   \
44                 u8 _smsize = _is_slave ? _sconfig->src_maxburst :       \
45                         DW_DMA_MSIZE_16;                        \
46                 u8 _dmsize = _is_slave ? _sconfig->dst_maxburst :       \
47                         DW_DMA_MSIZE_16;                        \
48                 u8 _dms = (_dwc->direction == DMA_MEM_TO_DEV) ?         \
49                         _dwc->dws.p_master : _dwc->dws.m_master;        \
50                 u8 _sms = (_dwc->direction == DMA_DEV_TO_MEM) ?         \
51                         _dwc->dws.p_master : _dwc->dws.m_master;        \
52                                                                 \
53                 (DWC_CTLL_DST_MSIZE(_dmsize)                    \
54                  | DWC_CTLL_SRC_MSIZE(_smsize)                  \
55                  | DWC_CTLL_LLP_D_EN                            \
56                  | DWC_CTLL_LLP_S_EN                            \
57                  | DWC_CTLL_DMS(_dms)                           \
58                  | DWC_CTLL_SMS(_sms));                         \
59         })
60
61 /* The set of bus widths supported by the DMA controller */
62 #define DW_DMA_BUSWIDTHS                          \
63         BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED)       | \
64         BIT(DMA_SLAVE_BUSWIDTH_1_BYTE)          | \
65         BIT(DMA_SLAVE_BUSWIDTH_2_BYTES)         | \
66         BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)
67
68 /*----------------------------------------------------------------------*/
69
70 static struct device *chan2dev(struct dma_chan *chan)
71 {
72         return &chan->dev->device;
73 }
74
75 static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc)
76 {
77         return to_dw_desc(dwc->active_list.next);
78 }
79
80 static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
81 {
82         struct dw_desc          *desc = txd_to_dw_desc(tx);
83         struct dw_dma_chan      *dwc = to_dw_dma_chan(tx->chan);
84         dma_cookie_t            cookie;
85         unsigned long           flags;
86
87         spin_lock_irqsave(&dwc->lock, flags);
88         cookie = dma_cookie_assign(tx);
89
90         /*
91          * REVISIT: We should attempt to chain as many descriptors as
92          * possible, perhaps even appending to those already submitted
93          * for DMA. But this is hard to do in a race-free manner.
94          */
95
96         list_add_tail(&desc->desc_node, &dwc->queue);
97         spin_unlock_irqrestore(&dwc->lock, flags);
98         dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n",
99                  __func__, desc->txd.cookie);
100
101         return cookie;
102 }
103
104 static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
105 {
106         struct dw_dma *dw = to_dw_dma(dwc->chan.device);
107         struct dw_desc *desc;
108         dma_addr_t phys;
109
110         desc = dma_pool_zalloc(dw->desc_pool, GFP_ATOMIC, &phys);
111         if (!desc)
112                 return NULL;
113
114         dwc->descs_allocated++;
115         INIT_LIST_HEAD(&desc->tx_list);
116         dma_async_tx_descriptor_init(&desc->txd, &dwc->chan);
117         desc->txd.tx_submit = dwc_tx_submit;
118         desc->txd.flags = DMA_CTRL_ACK;
119         desc->txd.phys = phys;
120         return desc;
121 }
122
123 static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
124 {
125         struct dw_dma *dw = to_dw_dma(dwc->chan.device);
126         struct dw_desc *child, *_next;
127
128         if (unlikely(!desc))
129                 return;
130
131         list_for_each_entry_safe(child, _next, &desc->tx_list, desc_node) {
132                 list_del(&child->desc_node);
133                 dma_pool_free(dw->desc_pool, child, child->txd.phys);
134                 dwc->descs_allocated--;
135         }
136
137         dma_pool_free(dw->desc_pool, desc, desc->txd.phys);
138         dwc->descs_allocated--;
139 }
140
141 static void dwc_initialize(struct dw_dma_chan *dwc)
142 {
143         struct dw_dma *dw = to_dw_dma(dwc->chan.device);
144
145         if (test_bit(DW_DMA_IS_INITIALIZED, &dwc->flags))
146                 return;
147
148         dw->initialize_chan(dwc);
149
150         /* Enable interrupts */
151         channel_set_bit(dw, MASK.XFER, dwc->mask);
152         channel_set_bit(dw, MASK.ERROR, dwc->mask);
153
154         set_bit(DW_DMA_IS_INITIALIZED, &dwc->flags);
155 }
156
157 /*----------------------------------------------------------------------*/
158
159 static inline void dwc_dump_chan_regs(struct dw_dma_chan *dwc)
160 {
161         dev_err(chan2dev(&dwc->chan),
162                 "  SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
163                 channel_readl(dwc, SAR),
164                 channel_readl(dwc, DAR),
165                 channel_readl(dwc, LLP),
166                 channel_readl(dwc, CTL_HI),
167                 channel_readl(dwc, CTL_LO));
168 }
169
170 static inline void dwc_chan_disable(struct dw_dma *dw, struct dw_dma_chan *dwc)
171 {
172         channel_clear_bit(dw, CH_EN, dwc->mask);
173         while (dma_readl(dw, CH_EN) & dwc->mask)
174                 cpu_relax();
175 }
176
177 /*----------------------------------------------------------------------*/
178
179 /* Perform single block transfer */
180 static inline void dwc_do_single_block(struct dw_dma_chan *dwc,
181                                        struct dw_desc *desc)
182 {
183         struct dw_dma   *dw = to_dw_dma(dwc->chan.device);
184         u32             ctllo;
185
186         /*
187          * Software emulation of LLP mode relies on interrupts to continue
188          * multi block transfer.
189          */
190         ctllo = lli_read(desc, ctllo) | DWC_CTLL_INT_EN;
191
192         channel_writel(dwc, SAR, lli_read(desc, sar));
193         channel_writel(dwc, DAR, lli_read(desc, dar));
194         channel_writel(dwc, CTL_LO, ctllo);
195         channel_writel(dwc, CTL_HI, lli_read(desc, ctlhi));
196         channel_set_bit(dw, CH_EN, dwc->mask);
197
198         /* Move pointer to next descriptor */
199         dwc->tx_node_active = dwc->tx_node_active->next;
200 }
201
202 /* Called with dwc->lock held and bh disabled */
203 static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
204 {
205         struct dw_dma   *dw = to_dw_dma(dwc->chan.device);
206         u8              lms = DWC_LLP_LMS(dwc->dws.m_master);
207         unsigned long   was_soft_llp;
208
209         /* ASSERT:  channel is idle */
210         if (dma_readl(dw, CH_EN) & dwc->mask) {
211                 dev_err(chan2dev(&dwc->chan),
212                         "%s: BUG: Attempted to start non-idle channel\n",
213                         __func__);
214                 dwc_dump_chan_regs(dwc);
215
216                 /* The tasklet will hopefully advance the queue... */
217                 return;
218         }
219
220         if (dwc->nollp) {
221                 was_soft_llp = test_and_set_bit(DW_DMA_IS_SOFT_LLP,
222                                                 &dwc->flags);
223                 if (was_soft_llp) {
224                         dev_err(chan2dev(&dwc->chan),
225                                 "BUG: Attempted to start new LLP transfer inside ongoing one\n");
226                         return;
227                 }
228
229                 dwc_initialize(dwc);
230
231                 first->residue = first->total_len;
232                 dwc->tx_node_active = &first->tx_list;
233
234                 /* Submit first block */
235                 dwc_do_single_block(dwc, first);
236
237                 return;
238         }
239
240         dwc_initialize(dwc);
241
242         channel_writel(dwc, LLP, first->txd.phys | lms);
243         channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
244         channel_writel(dwc, CTL_HI, 0);
245         channel_set_bit(dw, CH_EN, dwc->mask);
246 }
247
248 static void dwc_dostart_first_queued(struct dw_dma_chan *dwc)
249 {
250         struct dw_desc *desc;
251
252         if (list_empty(&dwc->queue))
253                 return;
254
255         list_move(dwc->queue.next, &dwc->active_list);
256         desc = dwc_first_active(dwc);
257         dev_vdbg(chan2dev(&dwc->chan), "%s: started %u\n", __func__, desc->txd.cookie);
258         dwc_dostart(dwc, desc);
259 }
260
261 /*----------------------------------------------------------------------*/
262
263 static void
264 dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc,
265                 bool callback_required)
266 {
267         struct dma_async_tx_descriptor  *txd = &desc->txd;
268         struct dw_desc                  *child;
269         unsigned long                   flags;
270         struct dmaengine_desc_callback  cb;
271
272         dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie);
273
274         spin_lock_irqsave(&dwc->lock, flags);
275         dma_cookie_complete(txd);
276         if (callback_required)
277                 dmaengine_desc_get_callback(txd, &cb);
278         else
279                 memset(&cb, 0, sizeof(cb));
280
281         /* async_tx_ack */
282         list_for_each_entry(child, &desc->tx_list, desc_node)
283                 async_tx_ack(&child->txd);
284         async_tx_ack(&desc->txd);
285         dwc_desc_put(dwc, desc);
286         spin_unlock_irqrestore(&dwc->lock, flags);
287
288         dmaengine_desc_callback_invoke(&cb, NULL);
289 }
290
291 static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
292 {
293         struct dw_desc *desc, *_desc;
294         LIST_HEAD(list);
295         unsigned long flags;
296
297         spin_lock_irqsave(&dwc->lock, flags);
298         if (dma_readl(dw, CH_EN) & dwc->mask) {
299                 dev_err(chan2dev(&dwc->chan),
300                         "BUG: XFER bit set, but channel not idle!\n");
301
302                 /* Try to continue after resetting the channel... */
303                 dwc_chan_disable(dw, dwc);
304         }
305
306         /*
307          * Submit queued descriptors ASAP, i.e. before we go through
308          * the completed ones.
309          */
310         list_splice_init(&dwc->active_list, &list);
311         dwc_dostart_first_queued(dwc);
312
313         spin_unlock_irqrestore(&dwc->lock, flags);
314
315         list_for_each_entry_safe(desc, _desc, &list, desc_node)
316                 dwc_descriptor_complete(dwc, desc, true);
317 }
318
319 /* Returns how many bytes were already received from source */
320 static inline u32 dwc_get_sent(struct dw_dma_chan *dwc)
321 {
322         struct dw_dma *dw = to_dw_dma(dwc->chan.device);
323         u32 ctlhi = channel_readl(dwc, CTL_HI);
324         u32 ctllo = channel_readl(dwc, CTL_LO);
325
326         return dw->block2bytes(dwc, ctlhi, ctllo >> 4 & 7);
327 }
328
329 static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
330 {
331         dma_addr_t llp;
332         struct dw_desc *desc, *_desc;
333         struct dw_desc *child;
334         u32 status_xfer;
335         unsigned long flags;
336
337         spin_lock_irqsave(&dwc->lock, flags);
338         llp = channel_readl(dwc, LLP);
339         status_xfer = dma_readl(dw, RAW.XFER);
340
341         if (status_xfer & dwc->mask) {
342                 /* Everything we've submitted is done */
343                 dma_writel(dw, CLEAR.XFER, dwc->mask);
344
345                 if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) {
346                         struct list_head *head, *active = dwc->tx_node_active;
347
348                         /*
349                          * We are inside first active descriptor.
350                          * Otherwise something is really wrong.
351                          */
352                         desc = dwc_first_active(dwc);
353
354                         head = &desc->tx_list;
355                         if (active != head) {
356                                 /* Update residue to reflect last sent descriptor */
357                                 if (active == head->next)
358                                         desc->residue -= desc->len;
359                                 else
360                                         desc->residue -= to_dw_desc(active->prev)->len;
361
362                                 child = to_dw_desc(active);
363
364                                 /* Submit next block */
365                                 dwc_do_single_block(dwc, child);
366
367                                 spin_unlock_irqrestore(&dwc->lock, flags);
368                                 return;
369                         }
370
371                         /* We are done here */
372                         clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags);
373                 }
374
375                 spin_unlock_irqrestore(&dwc->lock, flags);
376
377                 dwc_complete_all(dw, dwc);
378                 return;
379         }
380
381         if (list_empty(&dwc->active_list)) {
382                 spin_unlock_irqrestore(&dwc->lock, flags);
383                 return;
384         }
385
386         if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) {
387                 dev_vdbg(chan2dev(&dwc->chan), "%s: soft LLP mode\n", __func__);
388                 spin_unlock_irqrestore(&dwc->lock, flags);
389                 return;
390         }
391
392         dev_vdbg(chan2dev(&dwc->chan), "%s: llp=%pad\n", __func__, &llp);
393
394         list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) {
395                 /* Initial residue value */
396                 desc->residue = desc->total_len;
397
398                 /* Check first descriptors addr */
399                 if (desc->txd.phys == DWC_LLP_LOC(llp)) {
400                         spin_unlock_irqrestore(&dwc->lock, flags);
401                         return;
402                 }
403
404                 /* Check first descriptors llp */
405                 if (lli_read(desc, llp) == llp) {
406                         /* This one is currently in progress */
407                         desc->residue -= dwc_get_sent(dwc);
408                         spin_unlock_irqrestore(&dwc->lock, flags);
409                         return;
410                 }
411
412                 desc->residue -= desc->len;
413                 list_for_each_entry(child, &desc->tx_list, desc_node) {
414                         if (lli_read(child, llp) == llp) {
415                                 /* Currently in progress */
416                                 desc->residue -= dwc_get_sent(dwc);
417                                 spin_unlock_irqrestore(&dwc->lock, flags);
418                                 return;
419                         }
420                         desc->residue -= child->len;
421                 }
422
423                 /*
424                  * No descriptors so far seem to be in progress, i.e.
425                  * this one must be done.
426                  */
427                 spin_unlock_irqrestore(&dwc->lock, flags);
428                 dwc_descriptor_complete(dwc, desc, true);
429                 spin_lock_irqsave(&dwc->lock, flags);
430         }
431
432         dev_err(chan2dev(&dwc->chan),
433                 "BUG: All descriptors done, but channel not idle!\n");
434
435         /* Try to continue after resetting the channel... */
436         dwc_chan_disable(dw, dwc);
437
438         dwc_dostart_first_queued(dwc);
439         spin_unlock_irqrestore(&dwc->lock, flags);
440 }
441
442 static inline void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_desc *desc)
443 {
444         dev_crit(chan2dev(&dwc->chan), "  desc: s0x%x d0x%x l0x%x c0x%x:%x\n",
445                  lli_read(desc, sar),
446                  lli_read(desc, dar),
447                  lli_read(desc, llp),
448                  lli_read(desc, ctlhi),
449                  lli_read(desc, ctllo));
450 }
451
452 static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
453 {
454         struct dw_desc *bad_desc;
455         struct dw_desc *child;
456         unsigned long flags;
457
458         dwc_scan_descriptors(dw, dwc);
459
460         spin_lock_irqsave(&dwc->lock, flags);
461
462         /*
463          * The descriptor currently at the head of the active list is
464          * borked. Since we don't have any way to report errors, we'll
465          * just have to scream loudly and try to carry on.
466          */
467         bad_desc = dwc_first_active(dwc);
468         list_del_init(&bad_desc->desc_node);
469         list_move(dwc->queue.next, dwc->active_list.prev);
470
471         /* Clear the error flag and try to restart the controller */
472         dma_writel(dw, CLEAR.ERROR, dwc->mask);
473         if (!list_empty(&dwc->active_list))
474                 dwc_dostart(dwc, dwc_first_active(dwc));
475
476         /*
477          * WARN may seem harsh, but since this only happens
478          * when someone submits a bad physical address in a
479          * descriptor, we should consider ourselves lucky that the
480          * controller flagged an error instead of scribbling over
481          * random memory locations.
482          */
483         dev_WARN(chan2dev(&dwc->chan), "Bad descriptor submitted for DMA!\n"
484                                        "  cookie: %d\n", bad_desc->txd.cookie);
485         dwc_dump_lli(dwc, bad_desc);
486         list_for_each_entry(child, &bad_desc->tx_list, desc_node)
487                 dwc_dump_lli(dwc, child);
488
489         spin_unlock_irqrestore(&dwc->lock, flags);
490
491         /* Pretend the descriptor completed successfully */
492         dwc_descriptor_complete(dwc, bad_desc, true);
493 }
494
495 static void dw_dma_tasklet(unsigned long data)
496 {
497         struct dw_dma *dw = (struct dw_dma *)data;
498         struct dw_dma_chan *dwc;
499         u32 status_xfer;
500         u32 status_err;
501         unsigned int i;
502
503         status_xfer = dma_readl(dw, RAW.XFER);
504         status_err = dma_readl(dw, RAW.ERROR);
505
506         dev_vdbg(dw->dma.dev, "%s: status_err=%x\n", __func__, status_err);
507
508         for (i = 0; i < dw->dma.chancnt; i++) {
509                 dwc = &dw->chan[i];
510                 if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags))
511                         dev_vdbg(dw->dma.dev, "Cyclic xfer is not implemented\n");
512                 else if (status_err & (1 << i))
513                         dwc_handle_error(dw, dwc);
514                 else if (status_xfer & (1 << i))
515                         dwc_scan_descriptors(dw, dwc);
516         }
517
518         /* Re-enable interrupts */
519         channel_set_bit(dw, MASK.XFER, dw->all_chan_mask);
520         channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask);
521 }
522
523 static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
524 {
525         struct dw_dma *dw = dev_id;
526         u32 status;
527
528         /* Check if we have any interrupt from the DMAC which is not in use */
529         if (!dw->in_use)
530                 return IRQ_NONE;
531
532         status = dma_readl(dw, STATUS_INT);
533         dev_vdbg(dw->dma.dev, "%s: status=0x%x\n", __func__, status);
534
535         /* Check if we have any interrupt from the DMAC */
536         if (!status)
537                 return IRQ_NONE;
538
539         /*
540          * Just disable the interrupts. We'll turn them back on in the
541          * softirq handler.
542          */
543         channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
544         channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
545         channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
546
547         status = dma_readl(dw, STATUS_INT);
548         if (status) {
549                 dev_err(dw->dma.dev,
550                         "BUG: Unexpected interrupts pending: 0x%x\n",
551                         status);
552
553                 /* Try to recover */
554                 channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1);
555                 channel_clear_bit(dw, MASK.BLOCK, (1 << 8) - 1);
556                 channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1);
557                 channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1);
558                 channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1);
559         }
560
561         tasklet_schedule(&dw->tasklet);
562
563         return IRQ_HANDLED;
564 }
565
566 /*----------------------------------------------------------------------*/
567
568 static struct dma_async_tx_descriptor *
569 dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
570                 size_t len, unsigned long flags)
571 {
572         struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
573         struct dw_dma           *dw = to_dw_dma(chan->device);
574         struct dw_desc          *desc;
575         struct dw_desc          *first;
576         struct dw_desc          *prev;
577         size_t                  xfer_count;
578         size_t                  offset;
579         u8                      m_master = dwc->dws.m_master;
580         unsigned int            src_width;
581         unsigned int            dst_width;
582         unsigned int            data_width = dw->pdata->data_width[m_master];
583         u32                     ctllo, ctlhi;
584         u8                      lms = DWC_LLP_LMS(m_master);
585
586         dev_vdbg(chan2dev(chan),
587                         "%s: d%pad s%pad l0x%zx f0x%lx\n", __func__,
588                         &dest, &src, len, flags);
589
590         if (unlikely(!len)) {
591                 dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__);
592                 return NULL;
593         }
594
595         dwc->direction = DMA_MEM_TO_MEM;
596
597         src_width = dst_width = __ffs(data_width | src | dest | len);
598
599         ctllo = DWC_DEFAULT_CTLLO(chan)
600                         | DWC_CTLL_DST_WIDTH(dst_width)
601                         | DWC_CTLL_SRC_WIDTH(src_width)
602                         | DWC_CTLL_DST_INC
603                         | DWC_CTLL_SRC_INC
604                         | DWC_CTLL_FC_M2M;
605         prev = first = NULL;
606
607         for (offset = 0; offset < len; offset += xfer_count) {
608                 desc = dwc_desc_get(dwc);
609                 if (!desc)
610                         goto err_desc_get;
611
612                 ctlhi = dw->bytes2block(dwc, len - offset, src_width, &xfer_count);
613
614                 lli_write(desc, sar, src + offset);
615                 lli_write(desc, dar, dest + offset);
616                 lli_write(desc, ctllo, ctllo);
617                 lli_write(desc, ctlhi, ctlhi);
618                 desc->len = xfer_count;
619
620                 if (!first) {
621                         first = desc;
622                 } else {
623                         lli_write(prev, llp, desc->txd.phys | lms);
624                         list_add_tail(&desc->desc_node, &first->tx_list);
625                 }
626                 prev = desc;
627         }
628
629         if (flags & DMA_PREP_INTERRUPT)
630                 /* Trigger interrupt after last block */
631                 lli_set(prev, ctllo, DWC_CTLL_INT_EN);
632
633         prev->lli.llp = 0;
634         lli_clear(prev, ctllo, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
635         first->txd.flags = flags;
636         first->total_len = len;
637
638         return &first->txd;
639
640 err_desc_get:
641         dwc_desc_put(dwc, first);
642         return NULL;
643 }
644
645 static struct dma_async_tx_descriptor *
646 dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
647                 unsigned int sg_len, enum dma_transfer_direction direction,
648                 unsigned long flags, void *context)
649 {
650         struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
651         struct dw_dma           *dw = to_dw_dma(chan->device);
652         struct dma_slave_config *sconfig = &dwc->dma_sconfig;
653         struct dw_desc          *prev;
654         struct dw_desc          *first;
655         u32                     ctllo, ctlhi;
656         u8                      m_master = dwc->dws.m_master;
657         u8                      lms = DWC_LLP_LMS(m_master);
658         dma_addr_t              reg;
659         unsigned int            reg_width;
660         unsigned int            mem_width;
661         unsigned int            data_width = dw->pdata->data_width[m_master];
662         unsigned int            i;
663         struct scatterlist      *sg;
664         size_t                  total_len = 0;
665
666         dev_vdbg(chan2dev(chan), "%s\n", __func__);
667
668         if (unlikely(!is_slave_direction(direction) || !sg_len))
669                 return NULL;
670
671         dwc->direction = direction;
672
673         prev = first = NULL;
674
675         switch (direction) {
676         case DMA_MEM_TO_DEV:
677                 reg_width = __ffs(sconfig->dst_addr_width);
678                 reg = sconfig->dst_addr;
679                 ctllo = (DWC_DEFAULT_CTLLO(chan)
680                                 | DWC_CTLL_DST_WIDTH(reg_width)
681                                 | DWC_CTLL_DST_FIX
682                                 | DWC_CTLL_SRC_INC);
683
684                 ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
685                         DWC_CTLL_FC(DW_DMA_FC_D_M2P);
686
687                 for_each_sg(sgl, sg, sg_len, i) {
688                         struct dw_desc  *desc;
689                         u32             len, mem;
690                         size_t          dlen;
691
692                         mem = sg_dma_address(sg);
693                         len = sg_dma_len(sg);
694
695                         mem_width = __ffs(data_width | mem | len);
696
697 slave_sg_todev_fill_desc:
698                         desc = dwc_desc_get(dwc);
699                         if (!desc)
700                                 goto err_desc_get;
701
702                         ctlhi = dw->bytes2block(dwc, len, mem_width, &dlen);
703
704                         lli_write(desc, sar, mem);
705                         lli_write(desc, dar, reg);
706                         lli_write(desc, ctlhi, ctlhi);
707                         lli_write(desc, ctllo, ctllo | DWC_CTLL_SRC_WIDTH(mem_width));
708                         desc->len = dlen;
709
710                         if (!first) {
711                                 first = desc;
712                         } else {
713                                 lli_write(prev, llp, desc->txd.phys | lms);
714                                 list_add_tail(&desc->desc_node, &first->tx_list);
715                         }
716                         prev = desc;
717
718                         mem += dlen;
719                         len -= dlen;
720                         total_len += dlen;
721
722                         if (len)
723                                 goto slave_sg_todev_fill_desc;
724                 }
725                 break;
726         case DMA_DEV_TO_MEM:
727                 reg_width = __ffs(sconfig->src_addr_width);
728                 reg = sconfig->src_addr;
729                 ctllo = (DWC_DEFAULT_CTLLO(chan)
730                                 | DWC_CTLL_SRC_WIDTH(reg_width)
731                                 | DWC_CTLL_DST_INC
732                                 | DWC_CTLL_SRC_FIX);
733
734                 ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
735                         DWC_CTLL_FC(DW_DMA_FC_D_P2M);
736
737                 for_each_sg(sgl, sg, sg_len, i) {
738                         struct dw_desc  *desc;
739                         u32             len, mem;
740                         size_t          dlen;
741
742                         mem = sg_dma_address(sg);
743                         len = sg_dma_len(sg);
744
745 slave_sg_fromdev_fill_desc:
746                         desc = dwc_desc_get(dwc);
747                         if (!desc)
748                                 goto err_desc_get;
749
750                         ctlhi = dw->bytes2block(dwc, len, reg_width, &dlen);
751
752                         lli_write(desc, sar, reg);
753                         lli_write(desc, dar, mem);
754                         lli_write(desc, ctlhi, ctlhi);
755                         mem_width = __ffs(data_width | mem | dlen);
756                         lli_write(desc, ctllo, ctllo | DWC_CTLL_DST_WIDTH(mem_width));
757                         desc->len = dlen;
758
759                         if (!first) {
760                                 first = desc;
761                         } else {
762                                 lli_write(prev, llp, desc->txd.phys | lms);
763                                 list_add_tail(&desc->desc_node, &first->tx_list);
764                         }
765                         prev = desc;
766
767                         mem += dlen;
768                         len -= dlen;
769                         total_len += dlen;
770
771                         if (len)
772                                 goto slave_sg_fromdev_fill_desc;
773                 }
774                 break;
775         default:
776                 return NULL;
777         }
778
779         if (flags & DMA_PREP_INTERRUPT)
780                 /* Trigger interrupt after last block */
781                 lli_set(prev, ctllo, DWC_CTLL_INT_EN);
782
783         prev->lli.llp = 0;
784         lli_clear(prev, ctllo, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
785         first->total_len = total_len;
786
787         return &first->txd;
788
789 err_desc_get:
790         dev_err(chan2dev(chan),
791                 "not enough descriptors available. Direction %d\n", direction);
792         dwc_desc_put(dwc, first);
793         return NULL;
794 }
795
796 bool dw_dma_filter(struct dma_chan *chan, void *param)
797 {
798         struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
799         struct dw_dma_slave *dws = param;
800
801         if (dws->dma_dev != chan->device->dev)
802                 return false;
803
804         /* We have to copy data since dws can be temporary storage */
805         memcpy(&dwc->dws, dws, sizeof(struct dw_dma_slave));
806
807         return true;
808 }
809 EXPORT_SYMBOL_GPL(dw_dma_filter);
810
811 static int dwc_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
812 {
813         struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
814         struct dw_dma *dw = to_dw_dma(chan->device);
815
816         memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig));
817
818         dw->encode_maxburst(dwc, &dwc->dma_sconfig.src_maxburst);
819         dw->encode_maxburst(dwc, &dwc->dma_sconfig.dst_maxburst);
820
821         return 0;
822 }
823
824 static void dwc_chan_pause(struct dw_dma_chan *dwc, bool drain)
825 {
826         struct dw_dma *dw = to_dw_dma(dwc->chan.device);
827         unsigned int            count = 20;     /* timeout iterations */
828
829         dw->suspend_chan(dwc, drain);
830
831         while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY) && count--)
832                 udelay(2);
833
834         set_bit(DW_DMA_IS_PAUSED, &dwc->flags);
835 }
836
837 static int dwc_pause(struct dma_chan *chan)
838 {
839         struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
840         unsigned long           flags;
841
842         spin_lock_irqsave(&dwc->lock, flags);
843         dwc_chan_pause(dwc, false);
844         spin_unlock_irqrestore(&dwc->lock, flags);
845
846         return 0;
847 }
848
849 static inline void dwc_chan_resume(struct dw_dma_chan *dwc, bool drain)
850 {
851         struct dw_dma *dw = to_dw_dma(dwc->chan.device);
852
853         dw->resume_chan(dwc, drain);
854
855         clear_bit(DW_DMA_IS_PAUSED, &dwc->flags);
856 }
857
858 static int dwc_resume(struct dma_chan *chan)
859 {
860         struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
861         unsigned long           flags;
862
863         spin_lock_irqsave(&dwc->lock, flags);
864
865         if (test_bit(DW_DMA_IS_PAUSED, &dwc->flags))
866                 dwc_chan_resume(dwc, false);
867
868         spin_unlock_irqrestore(&dwc->lock, flags);
869
870         return 0;
871 }
872
873 static int dwc_terminate_all(struct dma_chan *chan)
874 {
875         struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
876         struct dw_dma           *dw = to_dw_dma(chan->device);
877         struct dw_desc          *desc, *_desc;
878         unsigned long           flags;
879         LIST_HEAD(list);
880
881         spin_lock_irqsave(&dwc->lock, flags);
882
883         clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags);
884
885         dwc_chan_pause(dwc, true);
886
887         dwc_chan_disable(dw, dwc);
888
889         dwc_chan_resume(dwc, true);
890
891         /* active_list entries will end up before queued entries */
892         list_splice_init(&dwc->queue, &list);
893         list_splice_init(&dwc->active_list, &list);
894
895         spin_unlock_irqrestore(&dwc->lock, flags);
896
897         /* Flush all pending and queued descriptors */
898         list_for_each_entry_safe(desc, _desc, &list, desc_node)
899                 dwc_descriptor_complete(dwc, desc, false);
900
901         return 0;
902 }
903
904 static struct dw_desc *dwc_find_desc(struct dw_dma_chan *dwc, dma_cookie_t c)
905 {
906         struct dw_desc *desc;
907
908         list_for_each_entry(desc, &dwc->active_list, desc_node)
909                 if (desc->txd.cookie == c)
910                         return desc;
911
912         return NULL;
913 }
914
915 static u32 dwc_get_residue(struct dw_dma_chan *dwc, dma_cookie_t cookie)
916 {
917         struct dw_desc *desc;
918         unsigned long flags;
919         u32 residue;
920
921         spin_lock_irqsave(&dwc->lock, flags);
922
923         desc = dwc_find_desc(dwc, cookie);
924         if (desc) {
925                 if (desc == dwc_first_active(dwc)) {
926                         residue = desc->residue;
927                         if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags) && residue)
928                                 residue -= dwc_get_sent(dwc);
929                 } else {
930                         residue = desc->total_len;
931                 }
932         } else {
933                 residue = 0;
934         }
935
936         spin_unlock_irqrestore(&dwc->lock, flags);
937         return residue;
938 }
939
940 static enum dma_status
941 dwc_tx_status(struct dma_chan *chan,
942               dma_cookie_t cookie,
943               struct dma_tx_state *txstate)
944 {
945         struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
946         enum dma_status         ret;
947
948         ret = dma_cookie_status(chan, cookie, txstate);
949         if (ret == DMA_COMPLETE)
950                 return ret;
951
952         dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
953
954         ret = dma_cookie_status(chan, cookie, txstate);
955         if (ret == DMA_COMPLETE)
956                 return ret;
957
958         dma_set_residue(txstate, dwc_get_residue(dwc, cookie));
959
960         if (test_bit(DW_DMA_IS_PAUSED, &dwc->flags) && ret == DMA_IN_PROGRESS)
961                 return DMA_PAUSED;
962
963         return ret;
964 }
965
966 static void dwc_issue_pending(struct dma_chan *chan)
967 {
968         struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
969         unsigned long           flags;
970
971         spin_lock_irqsave(&dwc->lock, flags);
972         if (list_empty(&dwc->active_list))
973                 dwc_dostart_first_queued(dwc);
974         spin_unlock_irqrestore(&dwc->lock, flags);
975 }
976
977 /*----------------------------------------------------------------------*/
978
979 void do_dw_dma_off(struct dw_dma *dw)
980 {
981         unsigned int i;
982
983         dma_writel(dw, CFG, 0);
984
985         channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
986         channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
987         channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
988         channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
989         channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
990
991         while (dma_readl(dw, CFG) & DW_CFG_DMA_EN)
992                 cpu_relax();
993
994         for (i = 0; i < dw->dma.chancnt; i++)
995                 clear_bit(DW_DMA_IS_INITIALIZED, &dw->chan[i].flags);
996 }
997
998 void do_dw_dma_on(struct dw_dma *dw)
999 {
1000         dma_writel(dw, CFG, DW_CFG_DMA_EN);
1001 }
1002
1003 static int dwc_alloc_chan_resources(struct dma_chan *chan)
1004 {
1005         struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
1006         struct dw_dma           *dw = to_dw_dma(chan->device);
1007
1008         dev_vdbg(chan2dev(chan), "%s\n", __func__);
1009
1010         /* ASSERT:  channel is idle */
1011         if (dma_readl(dw, CH_EN) & dwc->mask) {
1012                 dev_dbg(chan2dev(chan), "DMA channel not idle?\n");
1013                 return -EIO;
1014         }
1015
1016         dma_cookie_init(chan);
1017
1018         /*
1019          * NOTE: some controllers may have additional features that we
1020          * need to initialize here, like "scatter-gather" (which
1021          * doesn't mean what you think it means), and status writeback.
1022          */
1023
1024         /*
1025          * We need controller-specific data to set up slave transfers.
1026          */
1027         if (chan->private && !dw_dma_filter(chan, chan->private)) {
1028                 dev_warn(chan2dev(chan), "Wrong controller-specific data\n");
1029                 return -EINVAL;
1030         }
1031
1032         /* Enable controller here if needed */
1033         if (!dw->in_use)
1034                 do_dw_dma_on(dw);
1035         dw->in_use |= dwc->mask;
1036
1037         return 0;
1038 }
1039
1040 static void dwc_free_chan_resources(struct dma_chan *chan)
1041 {
1042         struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
1043         struct dw_dma           *dw = to_dw_dma(chan->device);
1044         unsigned long           flags;
1045         LIST_HEAD(list);
1046
1047         dev_dbg(chan2dev(chan), "%s: descs allocated=%u\n", __func__,
1048                         dwc->descs_allocated);
1049
1050         /* ASSERT:  channel is idle */
1051         BUG_ON(!list_empty(&dwc->active_list));
1052         BUG_ON(!list_empty(&dwc->queue));
1053         BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask);
1054
1055         spin_lock_irqsave(&dwc->lock, flags);
1056
1057         /* Clear custom channel configuration */
1058         memset(&dwc->dws, 0, sizeof(struct dw_dma_slave));
1059
1060         clear_bit(DW_DMA_IS_INITIALIZED, &dwc->flags);
1061
1062         /* Disable interrupts */
1063         channel_clear_bit(dw, MASK.XFER, dwc->mask);
1064         channel_clear_bit(dw, MASK.BLOCK, dwc->mask);
1065         channel_clear_bit(dw, MASK.ERROR, dwc->mask);
1066
1067         spin_unlock_irqrestore(&dwc->lock, flags);
1068
1069         /* Disable controller in case it was a last user */
1070         dw->in_use &= ~dwc->mask;
1071         if (!dw->in_use)
1072                 do_dw_dma_off(dw);
1073
1074         dev_vdbg(chan2dev(chan), "%s: done\n", __func__);
1075 }
1076
1077 int do_dma_probe(struct dw_dma_chip *chip)
1078 {
1079         struct dw_dma *dw = chip->dw;
1080         struct dw_dma_platform_data *pdata;
1081         bool                    autocfg = false;
1082         unsigned int            dw_params;
1083         unsigned int            i;
1084         int                     err;
1085
1086         dw->pdata = devm_kzalloc(chip->dev, sizeof(*dw->pdata), GFP_KERNEL);
1087         if (!dw->pdata)
1088                 return -ENOMEM;
1089
1090         dw->regs = chip->regs;
1091
1092         pm_runtime_get_sync(chip->dev);
1093
1094         if (!chip->pdata) {
1095                 dw_params = dma_readl(dw, DW_PARAMS);
1096                 dev_dbg(chip->dev, "DW_PARAMS: 0x%08x\n", dw_params);
1097
1098                 autocfg = dw_params >> DW_PARAMS_EN & 1;
1099                 if (!autocfg) {
1100                         err = -EINVAL;
1101                         goto err_pdata;
1102                 }
1103
1104                 /* Reassign the platform data pointer */
1105                 pdata = dw->pdata;
1106
1107                 /* Get hardware configuration parameters */
1108                 pdata->nr_channels = (dw_params >> DW_PARAMS_NR_CHAN & 7) + 1;
1109                 pdata->nr_masters = (dw_params >> DW_PARAMS_NR_MASTER & 3) + 1;
1110                 for (i = 0; i < pdata->nr_masters; i++) {
1111                         pdata->data_width[i] =
1112                                 4 << (dw_params >> DW_PARAMS_DATA_WIDTH(i) & 3);
1113                 }
1114                 pdata->block_size = dma_readl(dw, MAX_BLK_SIZE);
1115
1116                 /* Fill platform data with the default values */
1117                 pdata->chan_allocation_order = CHAN_ALLOCATION_ASCENDING;
1118                 pdata->chan_priority = CHAN_PRIORITY_ASCENDING;
1119         } else if (chip->pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS) {
1120                 err = -EINVAL;
1121                 goto err_pdata;
1122         } else {
1123                 memcpy(dw->pdata, chip->pdata, sizeof(*dw->pdata));
1124
1125                 /* Reassign the platform data pointer */
1126                 pdata = dw->pdata;
1127         }
1128
1129         dw->chan = devm_kcalloc(chip->dev, pdata->nr_channels, sizeof(*dw->chan),
1130                                 GFP_KERNEL);
1131         if (!dw->chan) {
1132                 err = -ENOMEM;
1133                 goto err_pdata;
1134         }
1135
1136         /* Calculate all channel mask before DMA setup */
1137         dw->all_chan_mask = (1 << pdata->nr_channels) - 1;
1138
1139         /* Force dma off, just in case */
1140         dw->disable(dw);
1141
1142         /* Device and instance ID for IRQ and DMA pool */
1143         dw->set_device_name(dw, chip->id);
1144
1145         /* Create a pool of consistent memory blocks for hardware descriptors */
1146         dw->desc_pool = dmam_pool_create(dw->name, chip->dev,
1147                                          sizeof(struct dw_desc), 4, 0);
1148         if (!dw->desc_pool) {
1149                 dev_err(chip->dev, "No memory for descriptors dma pool\n");
1150                 err = -ENOMEM;
1151                 goto err_pdata;
1152         }
1153
1154         tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw);
1155
1156         err = request_irq(chip->irq, dw_dma_interrupt, IRQF_SHARED,
1157                           dw->name, dw);
1158         if (err)
1159                 goto err_pdata;
1160
1161         INIT_LIST_HEAD(&dw->dma.channels);
1162         for (i = 0; i < pdata->nr_channels; i++) {
1163                 struct dw_dma_chan      *dwc = &dw->chan[i];
1164
1165                 dwc->chan.device = &dw->dma;
1166                 dma_cookie_init(&dwc->chan);
1167                 if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING)
1168                         list_add_tail(&dwc->chan.device_node,
1169                                         &dw->dma.channels);
1170                 else
1171                         list_add(&dwc->chan.device_node, &dw->dma.channels);
1172
1173                 /* 7 is highest priority & 0 is lowest. */
1174                 if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING)
1175                         dwc->priority = pdata->nr_channels - i - 1;
1176                 else
1177                         dwc->priority = i;
1178
1179                 dwc->ch_regs = &__dw_regs(dw)->CHAN[i];
1180                 spin_lock_init(&dwc->lock);
1181                 dwc->mask = 1 << i;
1182
1183                 INIT_LIST_HEAD(&dwc->active_list);
1184                 INIT_LIST_HEAD(&dwc->queue);
1185
1186                 channel_clear_bit(dw, CH_EN, dwc->mask);
1187
1188                 dwc->direction = DMA_TRANS_NONE;
1189
1190                 /* Hardware configuration */
1191                 if (autocfg) {
1192                         unsigned int r = DW_DMA_MAX_NR_CHANNELS - i - 1;
1193                         void __iomem *addr = &__dw_regs(dw)->DWC_PARAMS[r];
1194                         unsigned int dwc_params = readl(addr);
1195
1196                         dev_dbg(chip->dev, "DWC_PARAMS[%d]: 0x%08x\n", i,
1197                                            dwc_params);
1198
1199                         /*
1200                          * Decode maximum block size for given channel. The
1201                          * stored 4 bit value represents blocks from 0x00 for 3
1202                          * up to 0x0a for 4095.
1203                          */
1204                         dwc->block_size =
1205                                 (4 << ((pdata->block_size >> 4 * i) & 0xf)) - 1;
1206                         dwc->nollp =
1207                                 (dwc_params >> DWC_PARAMS_MBLK_EN & 0x1) == 0;
1208                 } else {
1209                         dwc->block_size = pdata->block_size;
1210                         dwc->nollp = !pdata->multi_block[i];
1211                 }
1212         }
1213
1214         /* Clear all interrupts on all channels. */
1215         dma_writel(dw, CLEAR.XFER, dw->all_chan_mask);
1216         dma_writel(dw, CLEAR.BLOCK, dw->all_chan_mask);
1217         dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask);
1218         dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask);
1219         dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask);
1220
1221         /* Set capabilities */
1222         dma_cap_set(DMA_SLAVE, dw->dma.cap_mask);
1223         dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask);
1224         dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask);
1225
1226         dw->dma.dev = chip->dev;
1227         dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources;
1228         dw->dma.device_free_chan_resources = dwc_free_chan_resources;
1229
1230         dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy;
1231         dw->dma.device_prep_slave_sg = dwc_prep_slave_sg;
1232
1233         dw->dma.device_config = dwc_config;
1234         dw->dma.device_pause = dwc_pause;
1235         dw->dma.device_resume = dwc_resume;
1236         dw->dma.device_terminate_all = dwc_terminate_all;
1237
1238         dw->dma.device_tx_status = dwc_tx_status;
1239         dw->dma.device_issue_pending = dwc_issue_pending;
1240
1241         /* DMA capabilities */
1242         dw->dma.src_addr_widths = DW_DMA_BUSWIDTHS;
1243         dw->dma.dst_addr_widths = DW_DMA_BUSWIDTHS;
1244         dw->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV) |
1245                              BIT(DMA_MEM_TO_MEM);
1246         dw->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1247
1248         err = dma_async_device_register(&dw->dma);
1249         if (err)
1250                 goto err_dma_register;
1251
1252         dev_info(chip->dev, "DesignWare DMA Controller, %d channels\n",
1253                  pdata->nr_channels);
1254
1255         pm_runtime_put_sync_suspend(chip->dev);
1256
1257         return 0;
1258
1259 err_dma_register:
1260         free_irq(chip->irq, dw);
1261 err_pdata:
1262         pm_runtime_put_sync_suspend(chip->dev);
1263         return err;
1264 }
1265
1266 int do_dma_remove(struct dw_dma_chip *chip)
1267 {
1268         struct dw_dma           *dw = chip->dw;
1269         struct dw_dma_chan      *dwc, *_dwc;
1270
1271         pm_runtime_get_sync(chip->dev);
1272
1273         do_dw_dma_off(dw);
1274         dma_async_device_unregister(&dw->dma);
1275
1276         free_irq(chip->irq, dw);
1277         tasklet_kill(&dw->tasklet);
1278
1279         list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels,
1280                         chan.device_node) {
1281                 list_del(&dwc->chan.device_node);
1282                 channel_clear_bit(dw, CH_EN, dwc->mask);
1283         }
1284
1285         pm_runtime_put_sync_suspend(chip->dev);
1286         return 0;
1287 }
1288
1289 int do_dw_dma_disable(struct dw_dma_chip *chip)
1290 {
1291         struct dw_dma *dw = chip->dw;
1292
1293         dw->disable(dw);
1294         return 0;
1295 }
1296 EXPORT_SYMBOL_GPL(do_dw_dma_disable);
1297
1298 int do_dw_dma_enable(struct dw_dma_chip *chip)
1299 {
1300         struct dw_dma *dw = chip->dw;
1301
1302         dw->enable(dw);
1303         return 0;
1304 }
1305 EXPORT_SYMBOL_GPL(do_dw_dma_enable);
1306
1307 MODULE_LICENSE("GPL v2");
1308 MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller core driver");
1309 MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
1310 MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>");