1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (C) 2015-2016 Marvell International Ltd.
8 #include <linux/dma-mapping.h>
9 #include <linux/interrupt.h>
11 #include <linux/module.h>
12 #include <linux/msi.h>
14 #include <linux/of_irq.h>
15 #include <linux/platform_device.h>
16 #include <linux/spinlock.h>
18 #include "dmaengine.h"
20 /* DMA Engine Registers */
21 #define MV_XOR_V2_DMA_DESQ_BALR_OFF 0x000
22 #define MV_XOR_V2_DMA_DESQ_BAHR_OFF 0x004
23 #define MV_XOR_V2_DMA_DESQ_SIZE_OFF 0x008
24 #define MV_XOR_V2_DMA_DESQ_DONE_OFF 0x00C
25 #define MV_XOR_V2_DMA_DESQ_DONE_PENDING_MASK 0x7FFF
26 #define MV_XOR_V2_DMA_DESQ_DONE_PENDING_SHIFT 0
27 #define MV_XOR_V2_DMA_DESQ_DONE_READ_PTR_MASK 0x1FFF
28 #define MV_XOR_V2_DMA_DESQ_DONE_READ_PTR_SHIFT 16
29 #define MV_XOR_V2_DMA_DESQ_ARATTR_OFF 0x010
30 #define MV_XOR_V2_DMA_DESQ_ATTR_CACHE_MASK 0x3F3F
31 #define MV_XOR_V2_DMA_DESQ_ATTR_OUTER_SHAREABLE 0x202
32 #define MV_XOR_V2_DMA_DESQ_ATTR_CACHEABLE 0x3C3C
33 #define MV_XOR_V2_DMA_IMSG_CDAT_OFF 0x014
34 #define MV_XOR_V2_DMA_IMSG_THRD_OFF 0x018
35 #define MV_XOR_V2_DMA_IMSG_THRD_MASK 0x7FFF
36 #define MV_XOR_V2_DMA_IMSG_THRD_SHIFT 0x0
37 #define MV_XOR_V2_DMA_IMSG_TIMER_EN BIT(18)
38 #define MV_XOR_V2_DMA_DESQ_AWATTR_OFF 0x01C
39 /* Same flags as MV_XOR_V2_DMA_DESQ_ARATTR_OFF */
40 #define MV_XOR_V2_DMA_DESQ_ALLOC_OFF 0x04C
41 #define MV_XOR_V2_DMA_DESQ_ALLOC_WRPTR_MASK 0xFFFF
42 #define MV_XOR_V2_DMA_DESQ_ALLOC_WRPTR_SHIFT 16
43 #define MV_XOR_V2_DMA_IMSG_BALR_OFF 0x050
44 #define MV_XOR_V2_DMA_IMSG_BAHR_OFF 0x054
45 #define MV_XOR_V2_DMA_DESQ_CTRL_OFF 0x100
46 #define MV_XOR_V2_DMA_DESQ_CTRL_32B 1
47 #define MV_XOR_V2_DMA_DESQ_CTRL_128B 7
48 #define MV_XOR_V2_DMA_DESQ_STOP_OFF 0x800
49 #define MV_XOR_V2_DMA_DESQ_DEALLOC_OFF 0x804
50 #define MV_XOR_V2_DMA_DESQ_ADD_OFF 0x808
51 #define MV_XOR_V2_DMA_IMSG_TMOT 0x810
52 #define MV_XOR_V2_DMA_IMSG_TIMER_THRD_MASK 0x1FFF
53 #define MV_XOR_V2_DMA_IMSG_TIMER_THRD_SHIFT 0
55 /* XOR Global registers */
56 #define MV_XOR_V2_GLOB_BW_CTRL 0x4
57 #define MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_RD_SHIFT 0
58 #define MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_RD_VAL 64
59 #define MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_WR_SHIFT 8
60 #define MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_WR_VAL 8
61 #define MV_XOR_V2_GLOB_BW_CTRL_RD_BURST_LEN_SHIFT 12
62 #define MV_XOR_V2_GLOB_BW_CTRL_RD_BURST_LEN_VAL 4
63 #define MV_XOR_V2_GLOB_BW_CTRL_WR_BURST_LEN_SHIFT 16
64 #define MV_XOR_V2_GLOB_BW_CTRL_WR_BURST_LEN_VAL 4
65 #define MV_XOR_V2_GLOB_PAUSE 0x014
66 #define MV_XOR_V2_GLOB_PAUSE_AXI_TIME_DIS_VAL 0x8
67 #define MV_XOR_V2_GLOB_SYS_INT_CAUSE 0x200
68 #define MV_XOR_V2_GLOB_SYS_INT_MASK 0x204
69 #define MV_XOR_V2_GLOB_MEM_INT_CAUSE 0x220
70 #define MV_XOR_V2_GLOB_MEM_INT_MASK 0x224
72 #define MV_XOR_V2_MIN_DESC_SIZE 32
73 #define MV_XOR_V2_EXT_DESC_SIZE 128
75 #define MV_XOR_V2_DESC_RESERVED_SIZE 12
76 #define MV_XOR_V2_DESC_BUFF_D_ADDR_SIZE 12
78 #define MV_XOR_V2_CMD_LINE_NUM_MAX_D_BUF 8
81 * Descriptors queue size. With 32 bytes descriptors, up to 2^14
82 * descriptors are allowed, with 128 bytes descriptors, up to 2^12
83 * descriptors are allowed. This driver uses 128 bytes descriptors,
84 * but experimentation has shown that a set of 1024 descriptors is
85 * sufficient to reach a good level of performance.
87 #define MV_XOR_V2_DESC_NUM 1024
90 * Threshold values for descriptors and timeout, determined by
91 * experimentation as giving a good level of performance.
93 #define MV_XOR_V2_DONE_IMSG_THRD 0x14
94 #define MV_XOR_V2_TIMER_THRD 0xB0
97 * struct mv_xor_v2_descriptor - DMA HW descriptor
98 * @desc_id: used by S/W and is not affected by H/W.
99 * @flags: error and status flags
100 * @crc32_result: CRC32 calculation result
101 * @desc_ctrl: operation mode and control flags
102 * @buff_size: amount of bytes to be processed
103 * @fill_pattern_src_addr: Fill-Pattern or Source-Address and
105 * @data_buff_addr: Source (and might be RAID6 destination)
106 * addresses of data buffers in RAID5 and RAID6
107 * @reserved: reserved
109 struct mv_xor_v2_descriptor {
115 /* Definitions for desc_ctrl */
116 #define DESC_NUM_ACTIVE_D_BUF_SHIFT 22
117 #define DESC_OP_MODE_SHIFT 28
118 #define DESC_OP_MODE_NOP 0 /* Idle operation */
119 #define DESC_OP_MODE_MEMCPY 1 /* Pure-DMA operation */
120 #define DESC_OP_MODE_MEMSET 2 /* Mem-Fill operation */
121 #define DESC_OP_MODE_MEMINIT 3 /* Mem-Init operation */
122 #define DESC_OP_MODE_MEM_COMPARE 4 /* Mem-Compare operation */
123 #define DESC_OP_MODE_CRC32 5 /* CRC32 calculation */
124 #define DESC_OP_MODE_XOR 6 /* RAID5 (XOR) operation */
125 #define DESC_OP_MODE_RAID6 7 /* RAID6 P&Q-generation */
126 #define DESC_OP_MODE_RAID6_REC 8 /* RAID6 Recovery */
127 #define DESC_Q_BUFFER_ENABLE BIT(16)
128 #define DESC_P_BUFFER_ENABLE BIT(17)
129 #define DESC_IOD BIT(27)
132 u32 fill_pattern_src_addr[4];
133 u32 data_buff_addr[MV_XOR_V2_DESC_BUFF_D_ADDR_SIZE];
134 u32 reserved[MV_XOR_V2_DESC_RESERVED_SIZE];
138 * struct mv_xor_v2_device - implements a xor device
139 * @lock: lock for the engine
140 * @dma_base: memory mapped DMA register base
141 * @glob_base: memory mapped global register base
143 * @free_sw_desc: linked list of free SW descriptors
144 * @dmadev: dma device
145 * @dmachan: dma channel
146 * @hw_desq: HW descriptors queue
147 * @hw_desq_virt: virtual address of DESCQ
148 * @sw_desq: SW descriptors queue
149 * @desc_size: HW descriptor size
150 * @npendings: number of pending descriptors (for which tx_submit has
151 * been called, but not yet issue_pending)
153 struct mv_xor_v2_device {
155 void __iomem *dma_base;
156 void __iomem *glob_base;
159 struct tasklet_struct irq_tasklet;
160 struct list_head free_sw_desc;
161 struct dma_device dmadev;
162 struct dma_chan dmachan;
164 struct mv_xor_v2_descriptor *hw_desq_virt;
165 struct mv_xor_v2_sw_desc *sw_desq;
167 unsigned int npendings;
168 unsigned int hw_queue_idx;
169 struct msi_desc *msi_desc;
173 * struct mv_xor_v2_sw_desc - implements a xor SW descriptor
174 * @idx: descriptor index
175 * @async_tx: support for the async_tx api
176 * @hw_desc: assosiated HW descriptor
177 * @free_list: node of the free SW descriprots list
179 struct mv_xor_v2_sw_desc {
181 struct dma_async_tx_descriptor async_tx;
182 struct mv_xor_v2_descriptor hw_desc;
183 struct list_head free_list;
187 * Fill the data buffers to a HW descriptor
189 static void mv_xor_v2_set_data_buffers(struct mv_xor_v2_device *xor_dev,
190 struct mv_xor_v2_descriptor *desc,
191 dma_addr_t src, int index)
193 int arr_index = ((index >> 1) * 3);
196 * Fill the buffer's addresses to the descriptor.
198 * The format of the buffers address for 2 sequential buffers
201 * First word: Buffer-DX-Address-Low[31:0]
202 * Second word: Buffer-DX+1-Address-Low[31:0]
203 * Third word: DX+1-Buffer-Address-High[47:32] [31:16]
204 * DX-Buffer-Address-High[47:32] [15:0]
206 if ((index & 0x1) == 0) {
207 desc->data_buff_addr[arr_index] = lower_32_bits(src);
209 desc->data_buff_addr[arr_index + 2] &= ~0xFFFF;
210 desc->data_buff_addr[arr_index + 2] |=
211 upper_32_bits(src) & 0xFFFF;
213 desc->data_buff_addr[arr_index + 1] =
216 desc->data_buff_addr[arr_index + 2] &= ~0xFFFF0000;
217 desc->data_buff_addr[arr_index + 2] |=
218 (upper_32_bits(src) & 0xFFFF) << 16;
223 * notify the engine of new descriptors, and update the available index.
225 static void mv_xor_v2_add_desc_to_desq(struct mv_xor_v2_device *xor_dev,
228 /* write the number of new descriptors in the DESQ. */
229 writel(num_of_desc, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_ADD_OFF);
233 * free HW descriptors
235 static void mv_xor_v2_free_desc_from_desq(struct mv_xor_v2_device *xor_dev,
238 /* write the number of new descriptors in the DESQ. */
239 writel(num_of_desc, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_DEALLOC_OFF);
243 * Set descriptor size
244 * Return the HW descriptor size in bytes
246 static int mv_xor_v2_set_desc_size(struct mv_xor_v2_device *xor_dev)
248 writel(MV_XOR_V2_DMA_DESQ_CTRL_128B,
249 xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_CTRL_OFF);
251 return MV_XOR_V2_EXT_DESC_SIZE;
255 * Set the IMSG threshold
258 void mv_xor_v2_enable_imsg_thrd(struct mv_xor_v2_device *xor_dev)
262 /* Configure threshold of number of descriptors, and enable timer */
263 reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_THRD_OFF);
264 reg &= (~MV_XOR_V2_DMA_IMSG_THRD_MASK << MV_XOR_V2_DMA_IMSG_THRD_SHIFT);
265 reg |= (MV_XOR_V2_DONE_IMSG_THRD << MV_XOR_V2_DMA_IMSG_THRD_SHIFT);
266 reg |= MV_XOR_V2_DMA_IMSG_TIMER_EN;
267 writel(reg, xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_THRD_OFF);
269 /* Configure Timer Threshold */
270 reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_TMOT);
271 reg &= (~MV_XOR_V2_DMA_IMSG_TIMER_THRD_MASK <<
272 MV_XOR_V2_DMA_IMSG_TIMER_THRD_SHIFT);
273 reg |= (MV_XOR_V2_TIMER_THRD << MV_XOR_V2_DMA_IMSG_TIMER_THRD_SHIFT);
274 writel(reg, xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_TMOT);
277 static irqreturn_t mv_xor_v2_interrupt_handler(int irq, void *data)
279 struct mv_xor_v2_device *xor_dev = data;
283 reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_DONE_OFF);
285 ndescs = ((reg >> MV_XOR_V2_DMA_DESQ_DONE_PENDING_SHIFT) &
286 MV_XOR_V2_DMA_DESQ_DONE_PENDING_MASK);
288 /* No descriptors to process */
292 /* schedule a tasklet to handle descriptors callbacks */
293 tasklet_schedule(&xor_dev->irq_tasklet);
299 * submit a descriptor to the DMA engine
302 mv_xor_v2_tx_submit(struct dma_async_tx_descriptor *tx)
306 struct mv_xor_v2_sw_desc *sw_desc =
307 container_of(tx, struct mv_xor_v2_sw_desc, async_tx);
308 struct mv_xor_v2_device *xor_dev =
309 container_of(tx->chan, struct mv_xor_v2_device, dmachan);
311 dev_dbg(xor_dev->dmadev.dev,
312 "%s sw_desc %p: async_tx %p\n",
313 __func__, sw_desc, &sw_desc->async_tx);
316 spin_lock_bh(&xor_dev->lock);
317 cookie = dma_cookie_assign(tx);
319 /* copy the HW descriptor from the SW descriptor to the DESQ */
320 dest_hw_desc = xor_dev->hw_desq_virt + xor_dev->hw_queue_idx;
322 memcpy(dest_hw_desc, &sw_desc->hw_desc, xor_dev->desc_size);
324 xor_dev->npendings++;
325 xor_dev->hw_queue_idx++;
326 if (xor_dev->hw_queue_idx >= MV_XOR_V2_DESC_NUM)
327 xor_dev->hw_queue_idx = 0;
329 spin_unlock_bh(&xor_dev->lock);
335 * Prepare a SW descriptor
337 static struct mv_xor_v2_sw_desc *
338 mv_xor_v2_prep_sw_desc(struct mv_xor_v2_device *xor_dev)
340 struct mv_xor_v2_sw_desc *sw_desc;
343 /* Lock the channel */
344 spin_lock_bh(&xor_dev->lock);
346 if (list_empty(&xor_dev->free_sw_desc)) {
347 spin_unlock_bh(&xor_dev->lock);
348 /* schedule tasklet to free some descriptors */
349 tasklet_schedule(&xor_dev->irq_tasklet);
353 list_for_each_entry(sw_desc, &xor_dev->free_sw_desc, free_list) {
354 if (async_tx_test_ack(&sw_desc->async_tx)) {
361 spin_unlock_bh(&xor_dev->lock);
365 list_del(&sw_desc->free_list);
367 /* Release the channel */
368 spin_unlock_bh(&xor_dev->lock);
374 * Prepare a HW descriptor for a memcpy operation
376 static struct dma_async_tx_descriptor *
377 mv_xor_v2_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
378 dma_addr_t src, size_t len, unsigned long flags)
380 struct mv_xor_v2_sw_desc *sw_desc;
381 struct mv_xor_v2_descriptor *hw_descriptor;
382 struct mv_xor_v2_device *xor_dev;
384 xor_dev = container_of(chan, struct mv_xor_v2_device, dmachan);
386 dev_dbg(xor_dev->dmadev.dev,
387 "%s len: %zu src %pad dest %pad flags: %ld\n",
388 __func__, len, &src, &dest, flags);
390 sw_desc = mv_xor_v2_prep_sw_desc(xor_dev);
394 sw_desc->async_tx.flags = flags;
396 /* set the HW descriptor */
397 hw_descriptor = &sw_desc->hw_desc;
399 /* save the SW descriptor ID to restore when operation is done */
400 hw_descriptor->desc_id = sw_desc->idx;
402 /* Set the MEMCPY control word */
403 hw_descriptor->desc_ctrl =
404 DESC_OP_MODE_MEMCPY << DESC_OP_MODE_SHIFT;
406 if (flags & DMA_PREP_INTERRUPT)
407 hw_descriptor->desc_ctrl |= DESC_IOD;
409 /* Set source address */
410 hw_descriptor->fill_pattern_src_addr[0] = lower_32_bits(src);
411 hw_descriptor->fill_pattern_src_addr[1] =
412 upper_32_bits(src) & 0xFFFF;
414 /* Set Destination address */
415 hw_descriptor->fill_pattern_src_addr[2] = lower_32_bits(dest);
416 hw_descriptor->fill_pattern_src_addr[3] =
417 upper_32_bits(dest) & 0xFFFF;
419 /* Set buffers size */
420 hw_descriptor->buff_size = len;
422 /* return the async tx descriptor */
423 return &sw_desc->async_tx;
427 * Prepare a HW descriptor for a XOR operation
429 static struct dma_async_tx_descriptor *
430 mv_xor_v2_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
431 unsigned int src_cnt, size_t len, unsigned long flags)
433 struct mv_xor_v2_sw_desc *sw_desc;
434 struct mv_xor_v2_descriptor *hw_descriptor;
435 struct mv_xor_v2_device *xor_dev =
436 container_of(chan, struct mv_xor_v2_device, dmachan);
439 if (src_cnt > MV_XOR_V2_CMD_LINE_NUM_MAX_D_BUF || src_cnt < 1)
442 dev_dbg(xor_dev->dmadev.dev,
443 "%s src_cnt: %d len: %zu dest %pad flags: %ld\n",
444 __func__, src_cnt, len, &dest, flags);
446 sw_desc = mv_xor_v2_prep_sw_desc(xor_dev);
450 sw_desc->async_tx.flags = flags;
452 /* set the HW descriptor */
453 hw_descriptor = &sw_desc->hw_desc;
455 /* save the SW descriptor ID to restore when operation is done */
456 hw_descriptor->desc_id = sw_desc->idx;
458 /* Set the XOR control word */
459 hw_descriptor->desc_ctrl =
460 DESC_OP_MODE_XOR << DESC_OP_MODE_SHIFT;
461 hw_descriptor->desc_ctrl |= DESC_P_BUFFER_ENABLE;
463 if (flags & DMA_PREP_INTERRUPT)
464 hw_descriptor->desc_ctrl |= DESC_IOD;
466 /* Set the data buffers */
467 for (i = 0; i < src_cnt; i++)
468 mv_xor_v2_set_data_buffers(xor_dev, hw_descriptor, src[i], i);
470 hw_descriptor->desc_ctrl |=
471 src_cnt << DESC_NUM_ACTIVE_D_BUF_SHIFT;
473 /* Set Destination address */
474 hw_descriptor->fill_pattern_src_addr[2] = lower_32_bits(dest);
475 hw_descriptor->fill_pattern_src_addr[3] =
476 upper_32_bits(dest) & 0xFFFF;
478 /* Set buffers size */
479 hw_descriptor->buff_size = len;
481 /* return the async tx descriptor */
482 return &sw_desc->async_tx;
486 * Prepare a HW descriptor for interrupt operation.
488 static struct dma_async_tx_descriptor *
489 mv_xor_v2_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags)
491 struct mv_xor_v2_sw_desc *sw_desc;
492 struct mv_xor_v2_descriptor *hw_descriptor;
493 struct mv_xor_v2_device *xor_dev =
494 container_of(chan, struct mv_xor_v2_device, dmachan);
496 sw_desc = mv_xor_v2_prep_sw_desc(xor_dev);
500 /* set the HW descriptor */
501 hw_descriptor = &sw_desc->hw_desc;
503 /* save the SW descriptor ID to restore when operation is done */
504 hw_descriptor->desc_id = sw_desc->idx;
506 /* Set the INTERRUPT control word */
507 hw_descriptor->desc_ctrl =
508 DESC_OP_MODE_NOP << DESC_OP_MODE_SHIFT;
509 hw_descriptor->desc_ctrl |= DESC_IOD;
511 /* return the async tx descriptor */
512 return &sw_desc->async_tx;
516 * push pending transactions to hardware
518 static void mv_xor_v2_issue_pending(struct dma_chan *chan)
520 struct mv_xor_v2_device *xor_dev =
521 container_of(chan, struct mv_xor_v2_device, dmachan);
523 spin_lock_bh(&xor_dev->lock);
526 * update the engine with the number of descriptors to
529 mv_xor_v2_add_desc_to_desq(xor_dev, xor_dev->npendings);
530 xor_dev->npendings = 0;
532 spin_unlock_bh(&xor_dev->lock);
536 int mv_xor_v2_get_pending_params(struct mv_xor_v2_device *xor_dev,
541 reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_DONE_OFF);
543 /* get the next pending descriptor index */
544 *pending_ptr = ((reg >> MV_XOR_V2_DMA_DESQ_DONE_READ_PTR_SHIFT) &
545 MV_XOR_V2_DMA_DESQ_DONE_READ_PTR_MASK);
547 /* get the number of descriptors pending handle */
548 return ((reg >> MV_XOR_V2_DMA_DESQ_DONE_PENDING_SHIFT) &
549 MV_XOR_V2_DMA_DESQ_DONE_PENDING_MASK);
553 * handle the descriptors after HW process
555 static void mv_xor_v2_tasklet(unsigned long data)
557 struct mv_xor_v2_device *xor_dev = (struct mv_xor_v2_device *) data;
558 int pending_ptr, num_of_pending, i;
559 struct mv_xor_v2_sw_desc *next_pending_sw_desc = NULL;
561 dev_dbg(xor_dev->dmadev.dev, "%s %d\n", __func__, __LINE__);
563 /* get the pending descriptors parameters */
564 num_of_pending = mv_xor_v2_get_pending_params(xor_dev, &pending_ptr);
566 /* loop over free descriptors */
567 for (i = 0; i < num_of_pending; i++) {
568 struct mv_xor_v2_descriptor *next_pending_hw_desc =
569 xor_dev->hw_desq_virt + pending_ptr;
571 /* get the SW descriptor related to the HW descriptor */
572 next_pending_sw_desc =
573 &xor_dev->sw_desq[next_pending_hw_desc->desc_id];
575 /* call the callback */
576 if (next_pending_sw_desc->async_tx.cookie > 0) {
578 * update the channel's completed cookie - no
579 * lock is required the IMSG threshold provide
582 dma_cookie_complete(&next_pending_sw_desc->async_tx);
584 dma_descriptor_unmap(&next_pending_sw_desc->async_tx);
585 dmaengine_desc_get_callback_invoke(
586 &next_pending_sw_desc->async_tx, NULL);
589 dma_run_dependencies(&next_pending_sw_desc->async_tx);
591 /* Lock the channel */
592 spin_lock_bh(&xor_dev->lock);
594 /* add the SW descriptor to the free descriptors list */
595 list_add(&next_pending_sw_desc->free_list,
596 &xor_dev->free_sw_desc);
598 /* Release the channel */
599 spin_unlock_bh(&xor_dev->lock);
601 /* increment the next descriptor */
603 if (pending_ptr >= MV_XOR_V2_DESC_NUM)
607 if (num_of_pending != 0) {
608 /* free the descriptores */
609 mv_xor_v2_free_desc_from_desq(xor_dev, num_of_pending);
614 * Set DMA Interrupt-message (IMSG) parameters
616 static void mv_xor_v2_set_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
618 struct mv_xor_v2_device *xor_dev = dev_get_drvdata(desc->dev);
620 writel(msg->address_lo,
621 xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_BALR_OFF);
622 writel(msg->address_hi & 0xFFFF,
623 xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_BAHR_OFF);
625 xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_CDAT_OFF);
628 static int mv_xor_v2_descq_init(struct mv_xor_v2_device *xor_dev)
632 /* write the DESQ size to the DMA engine */
633 writel(MV_XOR_V2_DESC_NUM,
634 xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_SIZE_OFF);
636 /* write the DESQ address to the DMA enngine*/
637 writel(lower_32_bits(xor_dev->hw_desq),
638 xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_BALR_OFF);
639 writel(upper_32_bits(xor_dev->hw_desq),
640 xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_BAHR_OFF);
643 * This is a temporary solution, until we activate the
644 * SMMU. Set the attributes for reading & writing data buffers
647 * - OuterShareable - Snoops will be performed on CPU caches
648 * - Enable cacheable - Bufferable, Modifiable, Other Allocate
651 reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_ARATTR_OFF);
652 reg &= ~MV_XOR_V2_DMA_DESQ_ATTR_CACHE_MASK;
653 reg |= MV_XOR_V2_DMA_DESQ_ATTR_OUTER_SHAREABLE |
654 MV_XOR_V2_DMA_DESQ_ATTR_CACHEABLE;
655 writel(reg, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_ARATTR_OFF);
657 reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_AWATTR_OFF);
658 reg &= ~MV_XOR_V2_DMA_DESQ_ATTR_CACHE_MASK;
659 reg |= MV_XOR_V2_DMA_DESQ_ATTR_OUTER_SHAREABLE |
660 MV_XOR_V2_DMA_DESQ_ATTR_CACHEABLE;
661 writel(reg, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_AWATTR_OFF);
663 /* BW CTRL - set values to optimize the XOR performance:
665 * - Set WrBurstLen & RdBurstLen - the unit will issue
666 * maximum of 256B write/read transactions.
667 * - Limit the number of outstanding write & read data
668 * (OBB/IBB) requests to the maximal value.
670 reg = ((MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_RD_VAL <<
671 MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_RD_SHIFT) |
672 (MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_WR_VAL <<
673 MV_XOR_V2_GLOB_BW_CTRL_NUM_OSTD_WR_SHIFT) |
674 (MV_XOR_V2_GLOB_BW_CTRL_RD_BURST_LEN_VAL <<
675 MV_XOR_V2_GLOB_BW_CTRL_RD_BURST_LEN_SHIFT) |
676 (MV_XOR_V2_GLOB_BW_CTRL_WR_BURST_LEN_VAL <<
677 MV_XOR_V2_GLOB_BW_CTRL_WR_BURST_LEN_SHIFT));
678 writel(reg, xor_dev->glob_base + MV_XOR_V2_GLOB_BW_CTRL);
680 /* Disable the AXI timer feature */
681 reg = readl(xor_dev->glob_base + MV_XOR_V2_GLOB_PAUSE);
682 reg |= MV_XOR_V2_GLOB_PAUSE_AXI_TIME_DIS_VAL;
683 writel(reg, xor_dev->glob_base + MV_XOR_V2_GLOB_PAUSE);
685 /* enable the DMA engine */
686 writel(0, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_STOP_OFF);
691 static int mv_xor_v2_suspend(struct platform_device *dev, pm_message_t state)
693 struct mv_xor_v2_device *xor_dev = platform_get_drvdata(dev);
695 /* Set this bit to disable to stop the XOR unit. */
696 writel(0x1, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_STOP_OFF);
701 static int mv_xor_v2_resume(struct platform_device *dev)
703 struct mv_xor_v2_device *xor_dev = platform_get_drvdata(dev);
705 mv_xor_v2_set_desc_size(xor_dev);
706 mv_xor_v2_enable_imsg_thrd(xor_dev);
707 mv_xor_v2_descq_init(xor_dev);
712 static int mv_xor_v2_probe(struct platform_device *pdev)
714 struct mv_xor_v2_device *xor_dev;
715 struct resource *res;
717 struct dma_device *dma_dev;
718 struct mv_xor_v2_sw_desc *sw_desc;
719 struct msi_desc *msi_desc;
721 BUILD_BUG_ON(sizeof(struct mv_xor_v2_descriptor) !=
722 MV_XOR_V2_EXT_DESC_SIZE);
724 xor_dev = devm_kzalloc(&pdev->dev, sizeof(*xor_dev), GFP_KERNEL);
728 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
729 xor_dev->dma_base = devm_ioremap_resource(&pdev->dev, res);
730 if (IS_ERR(xor_dev->dma_base))
731 return PTR_ERR(xor_dev->dma_base);
733 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
734 xor_dev->glob_base = devm_ioremap_resource(&pdev->dev, res);
735 if (IS_ERR(xor_dev->glob_base))
736 return PTR_ERR(xor_dev->glob_base);
738 platform_set_drvdata(pdev, xor_dev);
740 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
744 xor_dev->reg_clk = devm_clk_get(&pdev->dev, "reg");
745 if (PTR_ERR(xor_dev->reg_clk) != -ENOENT) {
746 if (!IS_ERR(xor_dev->reg_clk)) {
747 ret = clk_prepare_enable(xor_dev->reg_clk);
751 return PTR_ERR(xor_dev->reg_clk);
755 xor_dev->clk = devm_clk_get(&pdev->dev, NULL);
756 if (IS_ERR(xor_dev->clk) && PTR_ERR(xor_dev->clk) == -EPROBE_DEFER) {
758 goto disable_reg_clk;
760 if (!IS_ERR(xor_dev->clk)) {
761 ret = clk_prepare_enable(xor_dev->clk);
763 goto disable_reg_clk;
766 ret = platform_msi_domain_alloc_irqs(&pdev->dev, 1,
767 mv_xor_v2_set_msi_msg);
771 msi_desc = first_msi_entry(&pdev->dev);
774 xor_dev->msi_desc = msi_desc;
776 ret = devm_request_irq(&pdev->dev, msi_desc->irq,
777 mv_xor_v2_interrupt_handler, 0,
778 dev_name(&pdev->dev), xor_dev);
782 tasklet_init(&xor_dev->irq_tasklet, mv_xor_v2_tasklet,
783 (unsigned long) xor_dev);
785 xor_dev->desc_size = mv_xor_v2_set_desc_size(xor_dev);
787 dma_cookie_init(&xor_dev->dmachan);
790 * allocate coherent memory for hardware descriptors
791 * note: writecombine gives slightly better performance, but
792 * requires that we explicitly flush the writes
794 xor_dev->hw_desq_virt =
795 dma_alloc_coherent(&pdev->dev,
796 xor_dev->desc_size * MV_XOR_V2_DESC_NUM,
797 &xor_dev->hw_desq, GFP_KERNEL);
798 if (!xor_dev->hw_desq_virt) {
803 /* alloc memory for the SW descriptors */
804 xor_dev->sw_desq = devm_kcalloc(&pdev->dev,
805 MV_XOR_V2_DESC_NUM, sizeof(*sw_desc),
807 if (!xor_dev->sw_desq) {
812 spin_lock_init(&xor_dev->lock);
814 /* init the free SW descriptors list */
815 INIT_LIST_HEAD(&xor_dev->free_sw_desc);
817 /* add all SW descriptors to the free list */
818 for (i = 0; i < MV_XOR_V2_DESC_NUM; i++) {
819 struct mv_xor_v2_sw_desc *sw_desc =
820 xor_dev->sw_desq + i;
822 dma_async_tx_descriptor_init(&sw_desc->async_tx,
824 sw_desc->async_tx.tx_submit = mv_xor_v2_tx_submit;
825 async_tx_ack(&sw_desc->async_tx);
827 list_add(&sw_desc->free_list,
828 &xor_dev->free_sw_desc);
831 dma_dev = &xor_dev->dmadev;
833 /* set DMA capabilities */
834 dma_cap_zero(dma_dev->cap_mask);
835 dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
836 dma_cap_set(DMA_XOR, dma_dev->cap_mask);
837 dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask);
839 /* init dma link list */
840 INIT_LIST_HEAD(&dma_dev->channels);
842 /* set base routines */
843 dma_dev->device_tx_status = dma_cookie_status;
844 dma_dev->device_issue_pending = mv_xor_v2_issue_pending;
845 dma_dev->dev = &pdev->dev;
847 dma_dev->device_prep_dma_memcpy = mv_xor_v2_prep_dma_memcpy;
848 dma_dev->device_prep_dma_interrupt = mv_xor_v2_prep_dma_interrupt;
849 dma_dev->max_xor = 8;
850 dma_dev->device_prep_dma_xor = mv_xor_v2_prep_dma_xor;
852 xor_dev->dmachan.device = dma_dev;
854 list_add_tail(&xor_dev->dmachan.device_node,
857 mv_xor_v2_enable_imsg_thrd(xor_dev);
859 mv_xor_v2_descq_init(xor_dev);
861 ret = dma_async_device_register(dma_dev);
865 dev_notice(&pdev->dev, "Marvell Version 2 XOR driver\n");
870 dma_free_coherent(&pdev->dev,
871 xor_dev->desc_size * MV_XOR_V2_DESC_NUM,
872 xor_dev->hw_desq_virt, xor_dev->hw_desq);
874 platform_msi_domain_free_irqs(&pdev->dev);
876 clk_disable_unprepare(xor_dev->clk);
878 clk_disable_unprepare(xor_dev->reg_clk);
882 static int mv_xor_v2_remove(struct platform_device *pdev)
884 struct mv_xor_v2_device *xor_dev = platform_get_drvdata(pdev);
886 dma_async_device_unregister(&xor_dev->dmadev);
888 dma_free_coherent(&pdev->dev,
889 xor_dev->desc_size * MV_XOR_V2_DESC_NUM,
890 xor_dev->hw_desq_virt, xor_dev->hw_desq);
892 devm_free_irq(&pdev->dev, xor_dev->msi_desc->irq, xor_dev);
894 platform_msi_domain_free_irqs(&pdev->dev);
896 tasklet_kill(&xor_dev->irq_tasklet);
898 clk_disable_unprepare(xor_dev->clk);
904 static const struct of_device_id mv_xor_v2_dt_ids[] = {
905 { .compatible = "marvell,xor-v2", },
908 MODULE_DEVICE_TABLE(of, mv_xor_v2_dt_ids);
911 static struct platform_driver mv_xor_v2_driver = {
912 .probe = mv_xor_v2_probe,
913 .suspend = mv_xor_v2_suspend,
914 .resume = mv_xor_v2_resume,
915 .remove = mv_xor_v2_remove,
918 .of_match_table = of_match_ptr(mv_xor_v2_dt_ids),
922 module_platform_driver(mv_xor_v2_driver);
924 MODULE_DESCRIPTION("DMA engine driver for Marvell's Version 2 of XOR engine");
925 MODULE_LICENSE("GPL");