]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
dmaengine: ensure dmaengine helpers check valid callback
authorVinod Koul <vinod.koul@intel.com>
Tue, 12 Apr 2016 15:37:06 +0000 (21:07 +0530)
committerVinod Koul <vinod.koul@intel.com>
Tue, 12 Apr 2016 15:37:06 +0000 (21:07 +0530)
dmaengine has various device callbacks and exposes helper
functions to invoke these. These helpers should check if channel,
device and callback is valid or not before invoking them.

Reported-by: Jon Hunter <jonathanh@nvidia.com>
Signed-off-by: Vinod Koul <vinod.koul@intel.com>
include/linux/dmaengine.h

index 017433712833c99dcb0711338d8909cb73ed2df3..30de0197263a573e3ce0df09bdad1277d933029e 100644 (file)
@@ -804,6 +804,9 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single(
        sg_dma_address(&sg) = buf;
        sg_dma_len(&sg) = len;
 
+       if (!chan || !chan->device || !chan->device->device_prep_slave_sg)
+               return NULL;
+
        return chan->device->device_prep_slave_sg(chan, &sg, 1,
                                                  dir, flags, NULL);
 }
@@ -812,6 +815,9 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_sg(
        struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
        enum dma_transfer_direction dir, unsigned long flags)
 {
+       if (!chan || !chan->device || !chan->device->device_prep_slave_sg)
+               return NULL;
+
        return chan->device->device_prep_slave_sg(chan, sgl, sg_len,
                                                  dir, flags, NULL);
 }
@@ -823,6 +829,9 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_rio_sg(
        enum dma_transfer_direction dir, unsigned long flags,
        struct rio_dma_ext *rio_ext)
 {
+       if (!chan || !chan->device || !chan->device->device_prep_slave_sg)
+               return NULL;
+
        return chan->device->device_prep_slave_sg(chan, sgl, sg_len,
                                                  dir, flags, rio_ext);
 }
@@ -833,6 +842,9 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_cyclic(
                size_t period_len, enum dma_transfer_direction dir,
                unsigned long flags)
 {
+       if (!chan || !chan->device || !chan->device->device_prep_dma_cyclic)
+               return NULL;
+
        return chan->device->device_prep_dma_cyclic(chan, buf_addr, buf_len,
                                                period_len, dir, flags);
 }
@@ -841,6 +853,9 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_interleaved_dma(
                struct dma_chan *chan, struct dma_interleaved_template *xt,
                unsigned long flags)
 {
+       if (!chan || !chan->device || !chan->device->device_prep_interleaved_dma)
+               return NULL;
+
        return chan->device->device_prep_interleaved_dma(chan, xt, flags);
 }
 
@@ -848,7 +863,7 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memset(
                struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
                unsigned long flags)
 {
-       if (!chan || !chan->device)
+       if (!chan || !chan->device || !chan->device->device_prep_dma_memset)
                return NULL;
 
        return chan->device->device_prep_dma_memset(chan, dest, value,
@@ -861,6 +876,9 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_sg(
                struct scatterlist *src_sg, unsigned int src_nents,
                unsigned long flags)
 {
+       if (!chan || !chan->device || !chan->device->device_prep_dma_sg)
+               return NULL;
+
        return chan->device->device_prep_dma_sg(chan, dst_sg, dst_nents,
                        src_sg, src_nents, flags);
 }