]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
Merge branch 'topic/api_caps' into for-linus
authorVinod Koul <vinod.koul@intel.com>
Mon, 2 Sep 2013 12:10:40 +0000 (17:40 +0530)
committerVinod Koul <vinod.koul@intel.com>
Mon, 2 Sep 2013 12:10:40 +0000 (17:40 +0530)
1  2 
drivers/dma/pl330.c
include/linux/dmaengine.h

diff --combined drivers/dma/pl330.c
index cfd2d703fcb5614f9433ac8696107cc2f88cbac5,7c02e83c7308768f8846f1593e2404c7551186c2..36ed30116ee069f71ee652e29bcf8b05de89c665
@@@ -545,8 -545,6 +545,8 @@@ struct dma_pl330_chan 
  
        /* List of to be xfered descriptors */
        struct list_head work_list;
 +      /* List of completed descriptors */
 +      struct list_head completed_list;
  
        /* Pointer to the DMAC that manages this channel,
         * NULL if the channel is available to be acquired.
@@@ -2200,6 -2198,66 +2200,6 @@@ to_desc(struct dma_async_tx_descriptor 
        return container_of(tx, struct dma_pl330_desc, txd);
  }
  
 -static inline void free_desc_list(struct list_head *list)
 -{
 -      struct dma_pl330_dmac *pdmac;
 -      struct dma_pl330_desc *desc;
 -      struct dma_pl330_chan *pch = NULL;
 -      unsigned long flags;
 -
 -      /* Finish off the work list */
 -      list_for_each_entry(desc, list, node) {
 -              dma_async_tx_callback callback;
 -              void *param;
 -
 -              /* All desc in a list belong to same channel */
 -              pch = desc->pchan;
 -              callback = desc->txd.callback;
 -              param = desc->txd.callback_param;
 -
 -              if (callback)
 -                      callback(param);
 -
 -              desc->pchan = NULL;
 -      }
 -
 -      /* pch will be unset if list was empty */
 -      if (!pch)
 -              return;
 -
 -      pdmac = pch->dmac;
 -
 -      spin_lock_irqsave(&pdmac->pool_lock, flags);
 -      list_splice_tail_init(list, &pdmac->desc_pool);
 -      spin_unlock_irqrestore(&pdmac->pool_lock, flags);
 -}
 -
 -static inline void handle_cyclic_desc_list(struct list_head *list)
 -{
 -      struct dma_pl330_desc *desc;
 -      struct dma_pl330_chan *pch = NULL;
 -      unsigned long flags;
 -
 -      list_for_each_entry(desc, list, node) {
 -              dma_async_tx_callback callback;
 -
 -              /* Change status to reload it */
 -              desc->status = PREP;
 -              pch = desc->pchan;
 -              callback = desc->txd.callback;
 -              if (callback)
 -                      callback(desc->txd.callback_param);
 -      }
 -
 -      /* pch will be unset if list was empty */
 -      if (!pch)
 -              return;
 -
 -      spin_lock_irqsave(&pch->lock, flags);
 -      list_splice_tail_init(list, &pch->work_list);
 -      spin_unlock_irqrestore(&pch->lock, flags);
 -}
 -
  static inline void fill_queue(struct dma_pl330_chan *pch)
  {
        struct dma_pl330_desc *desc;
@@@ -2233,6 -2291,7 +2233,6 @@@ static void pl330_tasklet(unsigned lon
        struct dma_pl330_chan *pch = (struct dma_pl330_chan *)data;
        struct dma_pl330_desc *desc, *_dt;
        unsigned long flags;
 -      LIST_HEAD(list);
  
        spin_lock_irqsave(&pch->lock, flags);
  
                if (desc->status == DONE) {
                        if (!pch->cyclic)
                                dma_cookie_complete(&desc->txd);
 -                      list_move_tail(&desc->node, &list);
 +                      list_move_tail(&desc->node, &pch->completed_list);
                }
  
        /* Try to submit a req imm. next to the last completed cookie */
        /* Make sure the PL330 Channel thread is active */
        pl330_chan_ctrl(pch->pl330_chid, PL330_OP_START);
  
 -      spin_unlock_irqrestore(&pch->lock, flags);
 +      while (!list_empty(&pch->completed_list)) {
 +              dma_async_tx_callback callback;
 +              void *callback_param;
  
 -      if (pch->cyclic)
 -              handle_cyclic_desc_list(&list);
 -      else
 -              free_desc_list(&list);
 +              desc = list_first_entry(&pch->completed_list,
 +                                      struct dma_pl330_desc, node);
 +
 +              callback = desc->txd.callback;
 +              callback_param = desc->txd.callback_param;
 +
 +              if (pch->cyclic) {
 +                      desc->status = PREP;
 +                      list_move_tail(&desc->node, &pch->work_list);
 +              } else {
 +                      desc->status = FREE;
 +                      list_move_tail(&desc->node, &pch->dmac->desc_pool);
 +              }
 +
 +              if (callback) {
 +                      spin_unlock_irqrestore(&pch->lock, flags);
 +                      callback(callback_param);
 +                      spin_lock_irqsave(&pch->lock, flags);
 +              }
 +      }
 +      spin_unlock_irqrestore(&pch->lock, flags);
  }
  
  static void dma_pl330_rqcb(void *token, enum pl330_op_err err)
@@@ -2369,7 -2409,7 +2369,7 @@@ static int pl330_alloc_chan_resources(s
  static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg)
  {
        struct dma_pl330_chan *pch = to_pchan(chan);
 -      struct dma_pl330_desc *desc, *_dt;
 +      struct dma_pl330_desc *desc;
        unsigned long flags;
        struct dma_pl330_dmac *pdmac = pch->dmac;
        struct dma_slave_config *slave_config;
                pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH);
  
                /* Mark all desc done */
 -              list_for_each_entry_safe(desc, _dt, &pch->work_list , node) {
 -                      desc->status = DONE;
 -                      list_move_tail(&desc->node, &list);
 +              list_for_each_entry(desc, &pch->work_list , node) {
 +                      desc->status = FREE;
 +                      dma_cookie_complete(&desc->txd);
                }
  
 -              list_splice_tail_init(&list, &pdmac->desc_pool);
 +              list_for_each_entry(desc, &pch->completed_list , node) {
 +                      desc->status = FREE;
 +                      dma_cookie_complete(&desc->txd);
 +              }
 +
 +              list_splice_tail_init(&pch->work_list, &pdmac->desc_pool);
 +              list_splice_tail_init(&pch->completed_list, &pdmac->desc_pool);
                spin_unlock_irqrestore(&pch->lock, flags);
                break;
        case DMA_SLAVE_CONFIG:
@@@ -2471,10 -2505,6 +2471,10 @@@ static dma_cookie_t pl330_tx_submit(str
        /* Assign cookies to all nodes */
        while (!list_empty(&last->node)) {
                desc = list_entry(last->node.next, struct dma_pl330_desc, node);
 +              if (pch->cyclic) {
 +                      desc->txd.callback = last->txd.callback;
 +                      desc->txd.callback_param = last->txd.callback_param;
 +              }
  
                dma_cookie_assign(&desc->txd);
  
@@@ -2658,82 -2688,45 +2658,82 @@@ static struct dma_async_tx_descriptor *
                size_t period_len, enum dma_transfer_direction direction,
                unsigned long flags, void *context)
  {
 -      struct dma_pl330_desc *desc;
 +      struct dma_pl330_desc *desc = NULL, *first = NULL;
        struct dma_pl330_chan *pch = to_pchan(chan);
 +      struct dma_pl330_dmac *pdmac = pch->dmac;
 +      unsigned int i;
        dma_addr_t dst;
        dma_addr_t src;
  
 -      desc = pl330_get_desc(pch);
 -      if (!desc) {
 -              dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n",
 -                      __func__, __LINE__);
 +      if (len % period_len != 0)
                return NULL;
 -      }
  
 -      switch (direction) {
 -      case DMA_MEM_TO_DEV:
 -              desc->rqcfg.src_inc = 1;
 -              desc->rqcfg.dst_inc = 0;
 -              desc->req.rqtype = MEMTODEV;
 -              src = dma_addr;
 -              dst = pch->fifo_addr;
 -              break;
 -      case DMA_DEV_TO_MEM:
 -              desc->rqcfg.src_inc = 0;
 -              desc->rqcfg.dst_inc = 1;
 -              desc->req.rqtype = DEVTOMEM;
 -              src = pch->fifo_addr;
 -              dst = dma_addr;
 -              break;
 -      default:
 +      if (!is_slave_direction(direction)) {
                dev_err(pch->dmac->pif.dev, "%s:%d Invalid dma direction\n",
                __func__, __LINE__);
                return NULL;
        }
  
 -      desc->rqcfg.brst_size = pch->burst_sz;
 -      desc->rqcfg.brst_len = 1;
 +      for (i = 0; i < len / period_len; i++) {
 +              desc = pl330_get_desc(pch);
 +              if (!desc) {
 +                      dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n",
 +                              __func__, __LINE__);
  
 -      pch->cyclic = true;
 +                      if (!first)
 +                              return NULL;
  
 -      fill_px(&desc->px, dst, src, period_len);
 +                      spin_lock_irqsave(&pdmac->pool_lock, flags);
 +
 +                      while (!list_empty(&first->node)) {
 +                              desc = list_entry(first->node.next,
 +                                              struct dma_pl330_desc, node);
 +                              list_move_tail(&desc->node, &pdmac->desc_pool);
 +                      }
 +
 +                      list_move_tail(&first->node, &pdmac->desc_pool);
 +
 +                      spin_unlock_irqrestore(&pdmac->pool_lock, flags);
 +
 +                      return NULL;
 +              }
 +
 +              switch (direction) {
 +              case DMA_MEM_TO_DEV:
 +                      desc->rqcfg.src_inc = 1;
 +                      desc->rqcfg.dst_inc = 0;
 +                      desc->req.rqtype = MEMTODEV;
 +                      src = dma_addr;
 +                      dst = pch->fifo_addr;
 +                      break;
 +              case DMA_DEV_TO_MEM:
 +                      desc->rqcfg.src_inc = 0;
 +                      desc->rqcfg.dst_inc = 1;
 +                      desc->req.rqtype = DEVTOMEM;
 +                      src = pch->fifo_addr;
 +                      dst = dma_addr;
 +                      break;
 +              default:
 +                      break;
 +              }
 +
 +              desc->rqcfg.brst_size = pch->burst_sz;
 +              desc->rqcfg.brst_len = 1;
 +              fill_px(&desc->px, dst, src, period_len);
 +
 +              if (!first)
 +                      first = desc;
 +              else
 +                      list_add_tail(&desc->node, &first->node);
 +
 +              dma_addr += period_len;
 +      }
 +
 +      if (!desc)
 +              return NULL;
 +
 +      pch->cyclic = true;
 +      desc->txd.flags = flags;
  
        return &desc->txd;
  }
@@@ -2780,28 -2773,6 +2780,28 @@@ pl330_prep_dma_memcpy(struct dma_chan *
        return &desc->txd;
  }
  
 +static void __pl330_giveback_desc(struct dma_pl330_dmac *pdmac,
 +                                struct dma_pl330_desc *first)
 +{
 +      unsigned long flags;
 +      struct dma_pl330_desc *desc;
 +
 +      if (!first)
 +              return;
 +
 +      spin_lock_irqsave(&pdmac->pool_lock, flags);
 +
 +      while (!list_empty(&first->node)) {
 +              desc = list_entry(first->node.next,
 +                              struct dma_pl330_desc, node);
 +              list_move_tail(&desc->node, &pdmac->desc_pool);
 +      }
 +
 +      list_move_tail(&first->node, &pdmac->desc_pool);
 +
 +      spin_unlock_irqrestore(&pdmac->pool_lock, flags);
 +}
 +
  static struct dma_async_tx_descriptor *
  pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
                unsigned int sg_len, enum dma_transfer_direction direction,
        struct dma_pl330_desc *first, *desc = NULL;
        struct dma_pl330_chan *pch = to_pchan(chan);
        struct scatterlist *sg;
 -      unsigned long flags;
        int i;
        dma_addr_t addr;
  
                        dev_err(pch->dmac->pif.dev,
                                "%s:%d Unable to fetch desc\n",
                                __func__, __LINE__);
 -                      if (!first)
 -                              return NULL;
 -
 -                      spin_lock_irqsave(&pdmac->pool_lock, flags);
 -
 -                      while (!list_empty(&first->node)) {
 -                              desc = list_entry(first->node.next,
 -                                              struct dma_pl330_desc, node);
 -                              list_move_tail(&desc->node, &pdmac->desc_pool);
 -                      }
 -
 -                      list_move_tail(&first->node, &pdmac->desc_pool);
 -
 -                      spin_unlock_irqrestore(&pdmac->pool_lock, flags);
 +                      __pl330_giveback_desc(pdmac, first);
  
                        return NULL;
                }
@@@ -2870,6 -2855,32 +2870,32 @@@ static irqreturn_t pl330_irq_handler(in
                return IRQ_NONE;
  }
  
+ #define PL330_DMA_BUSWIDTHS \
+       BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
+       BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
+       BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
+       BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
+       BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)
+ static int pl330_dma_device_slave_caps(struct dma_chan *dchan,
+       struct dma_slave_caps *caps)
+ {
+       caps->src_addr_widths = PL330_DMA_BUSWIDTHS;
+       caps->dstn_addr_widths = PL330_DMA_BUSWIDTHS;
+       caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+       caps->cmd_pause = false;
+       caps->cmd_terminate = true;
+       /*
+        * This is the limit for transfers with a buswidth of 1, larger
+        * buswidths will have larger limits.
+        */
+       caps->max_sg_len = 1900800;
+       caps->max_sg_nr = 0;
+       return 0;
+ }
  static int
  pl330_probe(struct amba_device *adev, const struct amba_id *id)
  {
        int i, ret, irq;
        int num_chan;
  
 -      pdat = adev->dev.platform_data;
 +      pdat = dev_get_platdata(&adev->dev);
  
        /* Allocate a new DMAC and its Channels */
        pdmac = devm_kzalloc(&adev->dev, sizeof(*pdmac), GFP_KERNEL);
                        pch->chan.private = adev->dev.of_node;
  
                INIT_LIST_HEAD(&pch->work_list);
 +              INIT_LIST_HEAD(&pch->completed_list);
                spin_lock_init(&pch->lock);
                pch->pl330_chid = NULL;
                pch->chan.device = pd;
        pd->device_prep_slave_sg = pl330_prep_slave_sg;
        pd->device_control = pl330_control;
        pd->device_issue_pending = pl330_issue_pending;
+       pd->device_slave_caps = pl330_dma_device_slave_caps;
  
        ret = dma_async_device_register(pd);
        if (ret) {
index c271608e862e4a56e292de19240b7f25b506b193,5692bc3afd3929ab96ed38b911b9b0b0576bfa35..13ac4f55322789c3aedb6f88d9be2ccace6f90a4
@@@ -370,6 -370,33 +370,33 @@@ struct dma_slave_config 
        unsigned int slave_id;
  };
  
+ /* struct dma_slave_caps - expose capabilities of a slave channel only
+  *
+  * @src_addr_widths: bit mask of src addr widths the channel supports
+  * @dstn_addr_widths: bit mask of dstn addr widths the channel supports
+  * @directions: bit mask of slave direction the channel supported
+  *    since the enum dma_transfer_direction is not defined as bits for each
+  *    type of direction, the dma controller should fill (1 << <TYPE>) and same
+  *    should be checked by controller as well
+  * @cmd_pause: true, if pause and thereby resume is supported
+  * @cmd_terminate: true, if terminate cmd is supported
+  *
+  * @max_sg_nr: maximum number of SG segments supported
+  *    0 for no maximum
+  * @max_sg_len: maximum length of a SG segment supported
+  *    0 for no maximum
+  */
+ struct dma_slave_caps {
+       u32 src_addr_widths;
+       u32 dstn_addr_widths;
+       u32 directions;
+       bool cmd_pause;
+       bool cmd_terminate;
+       u32 max_sg_nr;
+       u32 max_sg_len;
+ };
  static inline const char *dma_chan_name(struct dma_chan *chan)
  {
        return dev_name(&chan->dev->device);
@@@ -532,6 -559,7 +559,7 @@@ struct dma_tx_state 
   *    struct with auxiliary transfer status information, otherwise the call
   *    will just return a simple status code
   * @device_issue_pending: push pending transactions to hardware
+  * @device_slave_caps: return the slave channel capabilities
   */
  struct dma_device {
  
                                            dma_cookie_t cookie,
                                            struct dma_tx_state *txstate);
        void (*device_issue_pending)(struct dma_chan *chan);
+       int (*device_slave_caps)(struct dma_chan *chan, struct dma_slave_caps *caps);
  };
  
  static inline int dmaengine_device_control(struct dma_chan *chan,
@@@ -670,6 -699,21 +699,21 @@@ static inline struct dma_async_tx_descr
        return chan->device->device_prep_interleaved_dma(chan, xt, flags);
  }
  
+ static inline int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
+ {
+       if (!chan || !caps)
+               return -EINVAL;
+       /* check if the channel supports slave transactions */
+       if (!test_bit(DMA_SLAVE, chan->device->cap_mask.bits))
+               return -ENXIO;
+       if (chan->device->device_slave_caps)
+               return chan->device->device_slave_caps(chan, caps);
+       return -ENXIO;
+ }
  static inline int dmaengine_terminate_all(struct dma_chan *chan)
  {
        return dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0);
@@@ -995,7 -1039,6 +1039,7 @@@ int dma_async_device_register(struct dm
  void dma_async_device_unregister(struct dma_device *device);
  void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
  struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type);
 +struct dma_chan *dma_get_slave_channel(struct dma_chan *chan);
  struct dma_chan *net_dma_find_channel(void);
  #define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y)
  #define dma_request_slave_channel_compat(mask, x, y, dev, name) \