]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
dmaengine: zynqmp_dma: replace spin_lock_bh with spin_lock_irqsave
authorMichael Tretter <m.tretter@pengutronix.de>
Mon, 26 Nov 2018 15:14:25 +0000 (16:14 +0100)
committerVinod Koul <vkoul@kernel.org>
Wed, 5 Dec 2018 09:07:32 +0000 (14:37 +0530)
All device_prep_dma_* functions and device_issue_pending can be called
from an interrupt context. As this includes hard IRQs, we must use
spin_lock_irqsave() instead of spin_lock_bh() to access chan->lock.

Signed-off-by: Michael Tretter <m.tretter@pengutronix.de>
Signed-off-by: Vinod Koul <vkoul@kernel.org>
drivers/dma/xilinx/zynqmp_dma.c

index c74a88b650396e34cb713e069ffb4640060d94f8..6f26b59a7216c7f545066ff4c502a6b431a3943d 100644 (file)
@@ -375,9 +375,10 @@ static dma_cookie_t zynqmp_dma_tx_submit(struct dma_async_tx_descriptor *tx)
        struct zynqmp_dma_chan *chan = to_chan(tx->chan);
        struct zynqmp_dma_desc_sw *desc, *new;
        dma_cookie_t cookie;
+       unsigned long irqflags;
 
        new = tx_to_desc(tx);
-       spin_lock_bh(&chan->lock);
+       spin_lock_irqsave(&chan->lock, irqflags);
        cookie = dma_cookie_assign(tx);
 
        if (!list_empty(&chan->pending_list)) {
@@ -393,7 +394,7 @@ static dma_cookie_t zynqmp_dma_tx_submit(struct dma_async_tx_descriptor *tx)
        }
 
        list_add_tail(&new->node, &chan->pending_list);
-       spin_unlock_bh(&chan->lock);
+       spin_unlock_irqrestore(&chan->lock, irqflags);
 
        return cookie;
 }
@@ -408,12 +409,13 @@ static struct zynqmp_dma_desc_sw *
 zynqmp_dma_get_descriptor(struct zynqmp_dma_chan *chan)
 {
        struct zynqmp_dma_desc_sw *desc;
+       unsigned long irqflags;
 
-       spin_lock_bh(&chan->lock);
+       spin_lock_irqsave(&chan->lock, irqflags);
        desc = list_first_entry(&chan->free_list,
                                struct zynqmp_dma_desc_sw, node);
        list_del(&desc->node);
-       spin_unlock_bh(&chan->lock);
+       spin_unlock_irqrestore(&chan->lock, irqflags);
 
        INIT_LIST_HEAD(&desc->tx_list);
        /* Clear the src and dst descriptor memory */
@@ -643,10 +645,11 @@ static void zynqmp_dma_complete_descriptor(struct zynqmp_dma_chan *chan)
 static void zynqmp_dma_issue_pending(struct dma_chan *dchan)
 {
        struct zynqmp_dma_chan *chan = to_chan(dchan);
+       unsigned long irqflags;
 
-       spin_lock_bh(&chan->lock);
+       spin_lock_irqsave(&chan->lock, irqflags);
        zynqmp_dma_start_transfer(chan);
-       spin_unlock_bh(&chan->lock);
+       spin_unlock_irqrestore(&chan->lock, irqflags);
 }
 
 /**
@@ -667,10 +670,11 @@ static void zynqmp_dma_free_descriptors(struct zynqmp_dma_chan *chan)
 static void zynqmp_dma_free_chan_resources(struct dma_chan *dchan)
 {
        struct zynqmp_dma_chan *chan = to_chan(dchan);
+       unsigned long irqflags;
 
-       spin_lock_bh(&chan->lock);
+       spin_lock_irqsave(&chan->lock, irqflags);
        zynqmp_dma_free_descriptors(chan);
-       spin_unlock_bh(&chan->lock);
+       spin_unlock_irqrestore(&chan->lock, irqflags);
        dma_free_coherent(chan->dev,
                (2 * ZYNQMP_DMA_DESC_SIZE(chan) * ZYNQMP_DMA_NUM_DESCS),
                chan->desc_pool_v, chan->desc_pool_p);
@@ -743,8 +747,9 @@ static void zynqmp_dma_do_tasklet(unsigned long data)
 {
        struct zynqmp_dma_chan *chan = (struct zynqmp_dma_chan *)data;
        u32 count;
+       unsigned long irqflags;
 
-       spin_lock(&chan->lock);
+       spin_lock_irqsave(&chan->lock, irqflags);
 
        if (chan->err) {
                zynqmp_dma_reset(chan);
@@ -764,7 +769,7 @@ static void zynqmp_dma_do_tasklet(unsigned long data)
                zynqmp_dma_start_transfer(chan);
 
 unlock:
-       spin_unlock(&chan->lock);
+       spin_unlock_irqrestore(&chan->lock, irqflags);
 }
 
 /**
@@ -776,11 +781,12 @@ static void zynqmp_dma_do_tasklet(unsigned long data)
 static int zynqmp_dma_device_terminate_all(struct dma_chan *dchan)
 {
        struct zynqmp_dma_chan *chan = to_chan(dchan);
+       unsigned long irqflags;
 
-       spin_lock_bh(&chan->lock);
+       spin_lock_irqsave(&chan->lock, irqflags);
        writel(ZYNQMP_DMA_IDS_DEFAULT_MASK, chan->regs + ZYNQMP_DMA_IDS);
        zynqmp_dma_free_descriptors(chan);
-       spin_unlock_bh(&chan->lock);
+       spin_unlock_irqrestore(&chan->lock, irqflags);
 
        return 0;
 }
@@ -804,19 +810,20 @@ static struct dma_async_tx_descriptor *zynqmp_dma_prep_memcpy(
        void *desc = NULL, *prev = NULL;
        size_t copy;
        u32 desc_cnt;
+       unsigned long irqflags;
 
        chan = to_chan(dchan);
 
        desc_cnt = DIV_ROUND_UP(len, ZYNQMP_DMA_MAX_TRANS_LEN);
 
-       spin_lock_bh(&chan->lock);
+       spin_lock_irqsave(&chan->lock, irqflags);
        if (desc_cnt > chan->desc_free_cnt) {
-               spin_unlock_bh(&chan->lock);
+               spin_unlock_irqrestore(&chan->lock, irqflags);
                dev_dbg(chan->dev, "chan %p descs are not available\n", chan);
                return NULL;
        }
        chan->desc_free_cnt = chan->desc_free_cnt - desc_cnt;
-       spin_unlock_bh(&chan->lock);
+       spin_unlock_irqrestore(&chan->lock, irqflags);
 
        do {
                /* Allocate and populate the descriptor */