1 // SPDX-License-Identifier: GPL-2.0-only
3 * offload engine driver for the Intel Xscale series of i/o processors
4 * Copyright © 2006, Intel Corporation.
8 * This driver supports the asynchrounous DMA copy and RAID engines available
9 * on the Intel Xscale(R) family of I/O Processors (IOP 32x, 33x, 134x)
12 #include <linux/init.h>
13 #include <linux/module.h>
14 #include <linux/delay.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/spinlock.h>
17 #include <linux/interrupt.h>
18 #include <linux/platform_device.h>
19 #include <linux/memory.h>
20 #include <linux/ioport.h>
21 #include <linux/raid/pq.h>
22 #include <linux/slab.h>
24 #include <mach/adma.h>
26 #include "dmaengine.h"
28 #define to_iop_adma_chan(chan) container_of(chan, struct iop_adma_chan, common)
29 #define to_iop_adma_device(dev) \
30 container_of(dev, struct iop_adma_device, common)
31 #define tx_to_iop_adma_slot(tx) \
32 container_of(tx, struct iop_adma_desc_slot, async_tx)
35 * iop_adma_free_slots - flags descriptor slots for reuse
37 * Caller must hold &iop_chan->lock while calling this function
39 static void iop_adma_free_slots(struct iop_adma_desc_slot *slot)
41 int stride = slot->slots_per_op;
44 slot->slots_per_op = 0;
45 slot = list_entry(slot->slot_node.next,
46 struct iop_adma_desc_slot,
52 iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc,
53 struct iop_adma_chan *iop_chan, dma_cookie_t cookie)
55 struct dma_async_tx_descriptor *tx = &desc->async_tx;
57 BUG_ON(tx->cookie < 0);
62 /* call the callback (must not sleep or submit new
63 * operations to this channel)
65 dmaengine_desc_get_callback_invoke(tx, NULL);
67 dma_descriptor_unmap(tx);
69 desc->group_head = NULL;
72 /* run dependent operations */
73 dma_run_dependencies(tx);
79 iop_adma_clean_slot(struct iop_adma_desc_slot *desc,
80 struct iop_adma_chan *iop_chan)
82 /* the client is allowed to attach dependent operations
85 if (!async_tx_test_ack(&desc->async_tx))
88 /* leave the last descriptor in the chain
89 * so we can append to it
91 if (desc->chain_node.next == &iop_chan->chain)
94 dev_dbg(iop_chan->device->common.dev,
95 "\tfree slot: %d slots_per_op: %d\n",
96 desc->idx, desc->slots_per_op);
98 list_del(&desc->chain_node);
99 iop_adma_free_slots(desc);
104 static void __iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan)
106 struct iop_adma_desc_slot *iter, *_iter, *grp_start = NULL;
107 dma_cookie_t cookie = 0;
108 u32 current_desc = iop_chan_get_current_descriptor(iop_chan);
109 int busy = iop_chan_is_busy(iop_chan);
110 int seen_current = 0, slot_cnt = 0, slots_per_op = 0;
112 dev_dbg(iop_chan->device->common.dev, "%s\n", __func__);
113 /* free completed slots from the chain starting with
114 * the oldest descriptor
116 list_for_each_entry_safe(iter, _iter, &iop_chan->chain,
118 pr_debug("\tcookie: %d slot: %d busy: %d "
119 "this_desc: %#x next_desc: %#x ack: %d\n",
120 iter->async_tx.cookie, iter->idx, busy,
121 iter->async_tx.phys, iop_desc_get_next_desc(iter),
122 async_tx_test_ack(&iter->async_tx));
124 prefetch(&_iter->async_tx);
126 /* do not advance past the current descriptor loaded into the
127 * hardware channel, subsequent descriptors are either in
128 * process or have not been submitted
133 /* stop the search if we reach the current descriptor and the
134 * channel is busy, or if it appears that the current descriptor
135 * needs to be re-read (i.e. has been appended to)
137 if (iter->async_tx.phys == current_desc) {
138 BUG_ON(seen_current++);
139 if (busy || iop_desc_get_next_desc(iter))
143 /* detect the start of a group transaction */
144 if (!slot_cnt && !slots_per_op) {
145 slot_cnt = iter->slot_cnt;
146 slots_per_op = iter->slots_per_op;
147 if (slot_cnt <= slots_per_op) {
154 pr_debug("\tgroup++\n");
157 slot_cnt -= slots_per_op;
160 /* all the members of a group are complete */
161 if (slots_per_op != 0 && slot_cnt == 0) {
162 struct iop_adma_desc_slot *grp_iter, *_grp_iter;
163 int end_of_chain = 0;
164 pr_debug("\tgroup end\n");
166 /* collect the total results */
167 if (grp_start->xor_check_result) {
168 u32 zero_sum_result = 0;
169 slot_cnt = grp_start->slot_cnt;
170 grp_iter = grp_start;
172 list_for_each_entry_from(grp_iter,
173 &iop_chan->chain, chain_node) {
175 iop_desc_get_zero_result(grp_iter);
176 pr_debug("\titer%d result: %d\n",
177 grp_iter->idx, zero_sum_result);
178 slot_cnt -= slots_per_op;
182 pr_debug("\tgrp_start->xor_check_result: %p\n",
183 grp_start->xor_check_result);
184 *grp_start->xor_check_result = zero_sum_result;
187 /* clean up the group */
188 slot_cnt = grp_start->slot_cnt;
189 grp_iter = grp_start;
190 list_for_each_entry_safe_from(grp_iter, _grp_iter,
191 &iop_chan->chain, chain_node) {
192 cookie = iop_adma_run_tx_complete_actions(
193 grp_iter, iop_chan, cookie);
195 slot_cnt -= slots_per_op;
196 end_of_chain = iop_adma_clean_slot(grp_iter,
199 if (slot_cnt == 0 || end_of_chain)
203 /* the group should be complete at this point */
212 } else if (slots_per_op) /* wait for group completion */
215 /* write back zero sum results (single descriptor case) */
216 if (iter->xor_check_result && iter->async_tx.cookie)
217 *iter->xor_check_result =
218 iop_desc_get_zero_result(iter);
220 cookie = iop_adma_run_tx_complete_actions(
221 iter, iop_chan, cookie);
223 if (iop_adma_clean_slot(iter, iop_chan))
228 iop_chan->common.completed_cookie = cookie;
229 pr_debug("\tcompleted cookie %d\n", cookie);
234 iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan)
236 spin_lock_bh(&iop_chan->lock);
237 __iop_adma_slot_cleanup(iop_chan);
238 spin_unlock_bh(&iop_chan->lock);
241 static void iop_adma_tasklet(unsigned long data)
243 struct iop_adma_chan *iop_chan = (struct iop_adma_chan *) data;
245 /* lockdep will flag depedency submissions as potentially
246 * recursive locking, this is not the case as a dependency
247 * submission will never recurse a channels submit routine.
248 * There are checks in async_tx.c to prevent this.
250 spin_lock_nested(&iop_chan->lock, SINGLE_DEPTH_NESTING);
251 __iop_adma_slot_cleanup(iop_chan);
252 spin_unlock(&iop_chan->lock);
255 static struct iop_adma_desc_slot *
256 iop_adma_alloc_slots(struct iop_adma_chan *iop_chan, int num_slots,
259 struct iop_adma_desc_slot *iter, *_iter, *alloc_start = NULL;
261 int slots_found, retry = 0;
263 /* start search from the last allocated descrtiptor
264 * if a contiguous allocation can not be found start searching
265 * from the beginning of the list
270 iter = iop_chan->last_used;
272 iter = list_entry(&iop_chan->all_slots,
273 struct iop_adma_desc_slot,
276 list_for_each_entry_safe_continue(
277 iter, _iter, &iop_chan->all_slots, slot_node) {
279 prefetch(&_iter->async_tx);
280 if (iter->slots_per_op) {
281 /* give up after finding the first busy slot
282 * on the second pass through the list
291 /* start the allocation if the slot is correctly aligned */
292 if (!slots_found++) {
293 if (iop_desc_is_aligned(iter, slots_per_op))
301 if (slots_found == num_slots) {
302 struct iop_adma_desc_slot *alloc_tail = NULL;
303 struct iop_adma_desc_slot *last_used = NULL;
307 dev_dbg(iop_chan->device->common.dev,
308 "allocated slot: %d "
309 "(desc %p phys: %#x) slots_per_op %d\n",
310 iter->idx, iter->hw_desc,
311 iter->async_tx.phys, slots_per_op);
313 /* pre-ack all but the last descriptor */
314 if (num_slots != slots_per_op)
315 async_tx_ack(&iter->async_tx);
317 list_add_tail(&iter->chain_node, &chain);
319 iter->async_tx.cookie = 0;
320 iter->slot_cnt = num_slots;
321 iter->xor_check_result = NULL;
322 for (i = 0; i < slots_per_op; i++) {
323 iter->slots_per_op = slots_per_op - i;
325 iter = list_entry(iter->slot_node.next,
326 struct iop_adma_desc_slot,
329 num_slots -= slots_per_op;
331 alloc_tail->group_head = alloc_start;
332 alloc_tail->async_tx.cookie = -EBUSY;
333 list_splice(&chain, &alloc_tail->tx_list);
334 iop_chan->last_used = last_used;
335 iop_desc_clear_next_desc(alloc_start);
336 iop_desc_clear_next_desc(alloc_tail);
343 /* perform direct reclaim if the allocation fails */
344 __iop_adma_slot_cleanup(iop_chan);
349 static void iop_adma_check_threshold(struct iop_adma_chan *iop_chan)
351 dev_dbg(iop_chan->device->common.dev, "pending: %d\n",
354 if (iop_chan->pending >= IOP_ADMA_THRESHOLD) {
355 iop_chan->pending = 0;
356 iop_chan_append(iop_chan);
361 iop_adma_tx_submit(struct dma_async_tx_descriptor *tx)
363 struct iop_adma_desc_slot *sw_desc = tx_to_iop_adma_slot(tx);
364 struct iop_adma_chan *iop_chan = to_iop_adma_chan(tx->chan);
365 struct iop_adma_desc_slot *grp_start, *old_chain_tail;
371 grp_start = sw_desc->group_head;
372 slot_cnt = grp_start->slot_cnt;
373 slots_per_op = grp_start->slots_per_op;
375 spin_lock_bh(&iop_chan->lock);
376 cookie = dma_cookie_assign(tx);
378 old_chain_tail = list_entry(iop_chan->chain.prev,
379 struct iop_adma_desc_slot, chain_node);
380 list_splice_init(&sw_desc->tx_list,
381 &old_chain_tail->chain_node);
383 /* fix up the hardware chain */
384 next_dma = grp_start->async_tx.phys;
385 iop_desc_set_next_desc(old_chain_tail, next_dma);
386 BUG_ON(iop_desc_get_next_desc(old_chain_tail) != next_dma); /* flush */
388 /* check for pre-chained descriptors */
389 iop_paranoia(iop_desc_get_next_desc(sw_desc));
391 /* increment the pending count by the number of slots
392 * memcpy operations have a 1:1 (slot:operation) relation
393 * other operations are heavier and will pop the threshold
396 iop_chan->pending += slot_cnt;
397 iop_adma_check_threshold(iop_chan);
398 spin_unlock_bh(&iop_chan->lock);
400 dev_dbg(iop_chan->device->common.dev, "%s cookie: %d slot: %d\n",
401 __func__, sw_desc->async_tx.cookie, sw_desc->idx);
406 static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan);
407 static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan);
410 * iop_adma_alloc_chan_resources - returns the number of allocated descriptors
411 * @chan - allocate descriptor resources for this channel
412 * @client - current client requesting the channel be ready for requests
414 * Note: We keep the slots for 1 operation on iop_chan->chain at all times. To
415 * avoid deadlock, via async_xor, num_descs_in_pool must at a minimum be
416 * greater than 2x the number slots needed to satisfy a device->max_xor
419 static int iop_adma_alloc_chan_resources(struct dma_chan *chan)
423 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
424 struct iop_adma_desc_slot *slot = NULL;
425 int init = iop_chan->slots_allocated ? 0 : 1;
426 struct iop_adma_platform_data *plat_data =
427 dev_get_platdata(&iop_chan->device->pdev->dev);
428 int num_descs_in_pool = plat_data->pool_size/IOP_ADMA_SLOT_SIZE;
430 /* Allocate descriptor slots */
432 idx = iop_chan->slots_allocated;
433 if (idx == num_descs_in_pool)
436 slot = kzalloc(sizeof(*slot), GFP_KERNEL);
438 printk(KERN_INFO "IOP ADMA Channel only initialized"
439 " %d descriptor slots", idx);
442 hw_desc = (char *) iop_chan->device->dma_desc_pool_virt;
443 slot->hw_desc = (void *) &hw_desc[idx * IOP_ADMA_SLOT_SIZE];
445 dma_async_tx_descriptor_init(&slot->async_tx, chan);
446 slot->async_tx.tx_submit = iop_adma_tx_submit;
447 INIT_LIST_HEAD(&slot->tx_list);
448 INIT_LIST_HEAD(&slot->chain_node);
449 INIT_LIST_HEAD(&slot->slot_node);
450 hw_desc = (char *) iop_chan->device->dma_desc_pool;
451 slot->async_tx.phys =
452 (dma_addr_t) &hw_desc[idx * IOP_ADMA_SLOT_SIZE];
455 spin_lock_bh(&iop_chan->lock);
456 iop_chan->slots_allocated++;
457 list_add_tail(&slot->slot_node, &iop_chan->all_slots);
458 spin_unlock_bh(&iop_chan->lock);
459 } while (iop_chan->slots_allocated < num_descs_in_pool);
461 if (idx && !iop_chan->last_used)
462 iop_chan->last_used = list_entry(iop_chan->all_slots.next,
463 struct iop_adma_desc_slot,
466 dev_dbg(iop_chan->device->common.dev,
467 "allocated %d descriptor slots last_used: %p\n",
468 iop_chan->slots_allocated, iop_chan->last_used);
470 /* initialize the channel and the chain with a null operation */
472 if (dma_has_cap(DMA_MEMCPY,
473 iop_chan->device->common.cap_mask))
474 iop_chan_start_null_memcpy(iop_chan);
475 else if (dma_has_cap(DMA_XOR,
476 iop_chan->device->common.cap_mask))
477 iop_chan_start_null_xor(iop_chan);
482 return (idx > 0) ? idx : -ENOMEM;
485 static struct dma_async_tx_descriptor *
486 iop_adma_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags)
488 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
489 struct iop_adma_desc_slot *sw_desc, *grp_start;
490 int slot_cnt, slots_per_op;
492 dev_dbg(iop_chan->device->common.dev, "%s\n", __func__);
494 spin_lock_bh(&iop_chan->lock);
495 slot_cnt = iop_chan_interrupt_slot_count(&slots_per_op, iop_chan);
496 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
498 grp_start = sw_desc->group_head;
499 iop_desc_init_interrupt(grp_start, iop_chan);
500 sw_desc->async_tx.flags = flags;
502 spin_unlock_bh(&iop_chan->lock);
504 return sw_desc ? &sw_desc->async_tx : NULL;
507 static struct dma_async_tx_descriptor *
508 iop_adma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest,
509 dma_addr_t dma_src, size_t len, unsigned long flags)
511 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
512 struct iop_adma_desc_slot *sw_desc, *grp_start;
513 int slot_cnt, slots_per_op;
517 BUG_ON(len > IOP_ADMA_MAX_BYTE_COUNT);
519 dev_dbg(iop_chan->device->common.dev, "%s len: %u\n",
522 spin_lock_bh(&iop_chan->lock);
523 slot_cnt = iop_chan_memcpy_slot_count(len, &slots_per_op);
524 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
526 grp_start = sw_desc->group_head;
527 iop_desc_init_memcpy(grp_start, flags);
528 iop_desc_set_byte_count(grp_start, iop_chan, len);
529 iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest);
530 iop_desc_set_memcpy_src_addr(grp_start, dma_src);
531 sw_desc->async_tx.flags = flags;
533 spin_unlock_bh(&iop_chan->lock);
535 return sw_desc ? &sw_desc->async_tx : NULL;
538 static struct dma_async_tx_descriptor *
539 iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest,
540 dma_addr_t *dma_src, unsigned int src_cnt, size_t len,
543 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
544 struct iop_adma_desc_slot *sw_desc, *grp_start;
545 int slot_cnt, slots_per_op;
549 BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT);
551 dev_dbg(iop_chan->device->common.dev,
552 "%s src_cnt: %d len: %u flags: %lx\n",
553 __func__, src_cnt, len, flags);
555 spin_lock_bh(&iop_chan->lock);
556 slot_cnt = iop_chan_xor_slot_count(len, src_cnt, &slots_per_op);
557 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
559 grp_start = sw_desc->group_head;
560 iop_desc_init_xor(grp_start, src_cnt, flags);
561 iop_desc_set_byte_count(grp_start, iop_chan, len);
562 iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest);
563 sw_desc->async_tx.flags = flags;
565 iop_desc_set_xor_src_addr(grp_start, src_cnt,
568 spin_unlock_bh(&iop_chan->lock);
570 return sw_desc ? &sw_desc->async_tx : NULL;
573 static struct dma_async_tx_descriptor *
574 iop_adma_prep_dma_xor_val(struct dma_chan *chan, dma_addr_t *dma_src,
575 unsigned int src_cnt, size_t len, u32 *result,
578 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
579 struct iop_adma_desc_slot *sw_desc, *grp_start;
580 int slot_cnt, slots_per_op;
585 dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %u\n",
586 __func__, src_cnt, len);
588 spin_lock_bh(&iop_chan->lock);
589 slot_cnt = iop_chan_zero_sum_slot_count(len, src_cnt, &slots_per_op);
590 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
592 grp_start = sw_desc->group_head;
593 iop_desc_init_zero_sum(grp_start, src_cnt, flags);
594 iop_desc_set_zero_sum_byte_count(grp_start, len);
595 grp_start->xor_check_result = result;
596 pr_debug("\t%s: grp_start->xor_check_result: %p\n",
597 __func__, grp_start->xor_check_result);
598 sw_desc->async_tx.flags = flags;
600 iop_desc_set_zero_sum_src_addr(grp_start, src_cnt,
603 spin_unlock_bh(&iop_chan->lock);
605 return sw_desc ? &sw_desc->async_tx : NULL;
608 static struct dma_async_tx_descriptor *
609 iop_adma_prep_dma_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
610 unsigned int src_cnt, const unsigned char *scf, size_t len,
613 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
614 struct iop_adma_desc_slot *sw_desc, *g;
615 int slot_cnt, slots_per_op;
620 BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT);
622 dev_dbg(iop_chan->device->common.dev,
623 "%s src_cnt: %d len: %u flags: %lx\n",
624 __func__, src_cnt, len, flags);
626 if (dmaf_p_disabled_continue(flags))
627 continue_srcs = 1+src_cnt;
628 else if (dmaf_continue(flags))
629 continue_srcs = 3+src_cnt;
631 continue_srcs = 0+src_cnt;
633 spin_lock_bh(&iop_chan->lock);
634 slot_cnt = iop_chan_pq_slot_count(len, continue_srcs, &slots_per_op);
635 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
639 g = sw_desc->group_head;
640 iop_desc_set_byte_count(g, iop_chan, len);
642 /* even if P is disabled its destination address (bits
643 * [3:0]) must match Q. It is ok if P points to an
644 * invalid address, it won't be written.
646 if (flags & DMA_PREP_PQ_DISABLE_P)
647 dst[0] = dst[1] & 0x7;
649 iop_desc_set_pq_addr(g, dst);
650 sw_desc->async_tx.flags = flags;
651 for (i = 0; i < src_cnt; i++)
652 iop_desc_set_pq_src_addr(g, i, src[i], scf[i]);
654 /* if we are continuing a previous operation factor in
655 * the old p and q values, see the comment for dma_maxpq
656 * in include/linux/dmaengine.h
658 if (dmaf_p_disabled_continue(flags))
659 iop_desc_set_pq_src_addr(g, i++, dst[1], 1);
660 else if (dmaf_continue(flags)) {
661 iop_desc_set_pq_src_addr(g, i++, dst[0], 0);
662 iop_desc_set_pq_src_addr(g, i++, dst[1], 1);
663 iop_desc_set_pq_src_addr(g, i++, dst[1], 0);
665 iop_desc_init_pq(g, i, flags);
667 spin_unlock_bh(&iop_chan->lock);
669 return sw_desc ? &sw_desc->async_tx : NULL;
672 static struct dma_async_tx_descriptor *
673 iop_adma_prep_dma_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
674 unsigned int src_cnt, const unsigned char *scf,
675 size_t len, enum sum_check_flags *pqres,
678 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
679 struct iop_adma_desc_slot *sw_desc, *g;
680 int slot_cnt, slots_per_op;
684 BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT);
686 dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %u\n",
687 __func__, src_cnt, len);
689 spin_lock_bh(&iop_chan->lock);
690 slot_cnt = iop_chan_pq_zero_sum_slot_count(len, src_cnt + 2, &slots_per_op);
691 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
693 /* for validate operations p and q are tagged onto the
694 * end of the source list
696 int pq_idx = src_cnt;
698 g = sw_desc->group_head;
699 iop_desc_init_pq_zero_sum(g, src_cnt+2, flags);
700 iop_desc_set_pq_zero_sum_byte_count(g, len);
701 g->pq_check_result = pqres;
702 pr_debug("\t%s: g->pq_check_result: %p\n",
703 __func__, g->pq_check_result);
704 sw_desc->async_tx.flags = flags;
706 iop_desc_set_pq_zero_sum_src_addr(g, src_cnt,
709 iop_desc_set_pq_zero_sum_addr(g, pq_idx, src);
711 spin_unlock_bh(&iop_chan->lock);
713 return sw_desc ? &sw_desc->async_tx : NULL;
716 static void iop_adma_free_chan_resources(struct dma_chan *chan)
718 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
719 struct iop_adma_desc_slot *iter, *_iter;
720 int in_use_descs = 0;
722 iop_adma_slot_cleanup(iop_chan);
724 spin_lock_bh(&iop_chan->lock);
725 list_for_each_entry_safe(iter, _iter, &iop_chan->chain,
728 list_del(&iter->chain_node);
730 list_for_each_entry_safe_reverse(
731 iter, _iter, &iop_chan->all_slots, slot_node) {
732 list_del(&iter->slot_node);
734 iop_chan->slots_allocated--;
736 iop_chan->last_used = NULL;
738 dev_dbg(iop_chan->device->common.dev, "%s slots_allocated %d\n",
739 __func__, iop_chan->slots_allocated);
740 spin_unlock_bh(&iop_chan->lock);
742 /* one is ok since we left it on there on purpose */
743 if (in_use_descs > 1)
744 printk(KERN_ERR "IOP: Freeing %d in use descriptors!\n",
749 * iop_adma_status - poll the status of an ADMA transaction
750 * @chan: ADMA channel handle
751 * @cookie: ADMA transaction identifier
752 * @txstate: a holder for the current state of the channel or NULL
754 static enum dma_status iop_adma_status(struct dma_chan *chan,
756 struct dma_tx_state *txstate)
758 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
761 ret = dma_cookie_status(chan, cookie, txstate);
762 if (ret == DMA_COMPLETE)
765 iop_adma_slot_cleanup(iop_chan);
767 return dma_cookie_status(chan, cookie, txstate);
770 static irqreturn_t iop_adma_eot_handler(int irq, void *data)
772 struct iop_adma_chan *chan = data;
774 dev_dbg(chan->device->common.dev, "%s\n", __func__);
776 tasklet_schedule(&chan->irq_tasklet);
778 iop_adma_device_clear_eot_status(chan);
783 static irqreturn_t iop_adma_eoc_handler(int irq, void *data)
785 struct iop_adma_chan *chan = data;
787 dev_dbg(chan->device->common.dev, "%s\n", __func__);
789 tasklet_schedule(&chan->irq_tasklet);
791 iop_adma_device_clear_eoc_status(chan);
796 static irqreturn_t iop_adma_err_handler(int irq, void *data)
798 struct iop_adma_chan *chan = data;
799 unsigned long status = iop_chan_get_status(chan);
801 dev_err(chan->device->common.dev,
802 "error ( %s%s%s%s%s%s%s)\n",
803 iop_is_err_int_parity(status, chan) ? "int_parity " : "",
804 iop_is_err_mcu_abort(status, chan) ? "mcu_abort " : "",
805 iop_is_err_int_tabort(status, chan) ? "int_tabort " : "",
806 iop_is_err_int_mabort(status, chan) ? "int_mabort " : "",
807 iop_is_err_pci_tabort(status, chan) ? "pci_tabort " : "",
808 iop_is_err_pci_mabort(status, chan) ? "pci_mabort " : "",
809 iop_is_err_split_tx(status, chan) ? "split_tx " : "");
811 iop_adma_device_clear_err_status(chan);
818 static void iop_adma_issue_pending(struct dma_chan *chan)
820 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
822 if (iop_chan->pending) {
823 iop_chan->pending = 0;
824 iop_chan_append(iop_chan);
829 * Perform a transaction to verify the HW works.
831 #define IOP_ADMA_TEST_SIZE 2000
833 static int iop_adma_memcpy_self_test(struct iop_adma_device *device)
837 dma_addr_t src_dma, dest_dma;
838 struct dma_chan *dma_chan;
840 struct dma_async_tx_descriptor *tx;
842 struct iop_adma_chan *iop_chan;
844 dev_dbg(device->common.dev, "%s\n", __func__);
846 src = kmalloc(IOP_ADMA_TEST_SIZE, GFP_KERNEL);
849 dest = kzalloc(IOP_ADMA_TEST_SIZE, GFP_KERNEL);
855 /* Fill in src buffer */
856 for (i = 0; i < IOP_ADMA_TEST_SIZE; i++)
857 ((u8 *) src)[i] = (u8)i;
859 /* Start copy, using first DMA channel */
860 dma_chan = container_of(device->common.channels.next,
863 if (iop_adma_alloc_chan_resources(dma_chan) < 1) {
868 dest_dma = dma_map_single(dma_chan->device->dev, dest,
869 IOP_ADMA_TEST_SIZE, DMA_FROM_DEVICE);
870 src_dma = dma_map_single(dma_chan->device->dev, src,
871 IOP_ADMA_TEST_SIZE, DMA_TO_DEVICE);
872 tx = iop_adma_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
874 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
876 cookie = iop_adma_tx_submit(tx);
877 iop_adma_issue_pending(dma_chan);
880 if (iop_adma_status(dma_chan, cookie, NULL) !=
882 dev_err(dma_chan->device->dev,
883 "Self-test copy timed out, disabling\n");
888 iop_chan = to_iop_adma_chan(dma_chan);
889 dma_sync_single_for_cpu(&iop_chan->device->pdev->dev, dest_dma,
890 IOP_ADMA_TEST_SIZE, DMA_FROM_DEVICE);
891 if (memcmp(src, dest, IOP_ADMA_TEST_SIZE)) {
892 dev_err(dma_chan->device->dev,
893 "Self-test copy failed compare, disabling\n");
899 iop_adma_free_chan_resources(dma_chan);
906 #define IOP_ADMA_NUM_SRC_TEST 4 /* must be <= 15 */
908 iop_adma_xor_val_self_test(struct iop_adma_device *device)
912 struct page *xor_srcs[IOP_ADMA_NUM_SRC_TEST];
913 struct page *zero_sum_srcs[IOP_ADMA_NUM_SRC_TEST + 1];
914 dma_addr_t dma_srcs[IOP_ADMA_NUM_SRC_TEST + 1];
916 struct dma_async_tx_descriptor *tx;
917 struct dma_chan *dma_chan;
923 struct iop_adma_chan *iop_chan;
925 dev_dbg(device->common.dev, "%s\n", __func__);
927 for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++) {
928 xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
929 if (!xor_srcs[src_idx]) {
931 __free_page(xor_srcs[src_idx]);
936 dest = alloc_page(GFP_KERNEL);
939 __free_page(xor_srcs[src_idx]);
943 /* Fill in src buffers */
944 for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++) {
945 u8 *ptr = page_address(xor_srcs[src_idx]);
946 for (i = 0; i < PAGE_SIZE; i++)
947 ptr[i] = (1 << src_idx);
950 for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++)
951 cmp_byte ^= (u8) (1 << src_idx);
953 cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
954 (cmp_byte << 8) | cmp_byte;
956 memset(page_address(dest), 0, PAGE_SIZE);
958 dma_chan = container_of(device->common.channels.next,
961 if (iop_adma_alloc_chan_resources(dma_chan) < 1) {
967 dest_dma = dma_map_page(dma_chan->device->dev, dest, 0,
968 PAGE_SIZE, DMA_FROM_DEVICE);
969 for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++)
970 dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
971 0, PAGE_SIZE, DMA_TO_DEVICE);
972 tx = iop_adma_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
973 IOP_ADMA_NUM_SRC_TEST, PAGE_SIZE,
974 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
976 cookie = iop_adma_tx_submit(tx);
977 iop_adma_issue_pending(dma_chan);
980 if (iop_adma_status(dma_chan, cookie, NULL) !=
982 dev_err(dma_chan->device->dev,
983 "Self-test xor timed out, disabling\n");
988 iop_chan = to_iop_adma_chan(dma_chan);
989 dma_sync_single_for_cpu(&iop_chan->device->pdev->dev, dest_dma,
990 PAGE_SIZE, DMA_FROM_DEVICE);
991 for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
992 u32 *ptr = page_address(dest);
993 if (ptr[i] != cmp_word) {
994 dev_err(dma_chan->device->dev,
995 "Self-test xor failed compare, disabling\n");
1000 dma_sync_single_for_device(&iop_chan->device->pdev->dev, dest_dma,
1001 PAGE_SIZE, DMA_TO_DEVICE);
1003 /* skip zero sum if the capability is not present */
1004 if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask))
1005 goto free_resources;
1007 /* zero sum the sources with the destintation page */
1008 for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++)
1009 zero_sum_srcs[i] = xor_srcs[i];
1010 zero_sum_srcs[i] = dest;
1012 zero_sum_result = 1;
1014 for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 1; i++)
1015 dma_srcs[i] = dma_map_page(dma_chan->device->dev,
1016 zero_sum_srcs[i], 0, PAGE_SIZE,
1018 tx = iop_adma_prep_dma_xor_val(dma_chan, dma_srcs,
1019 IOP_ADMA_NUM_SRC_TEST + 1, PAGE_SIZE,
1021 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1023 cookie = iop_adma_tx_submit(tx);
1024 iop_adma_issue_pending(dma_chan);
1027 if (iop_adma_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
1028 dev_err(dma_chan->device->dev,
1029 "Self-test zero sum timed out, disabling\n");
1031 goto free_resources;
1034 if (zero_sum_result != 0) {
1035 dev_err(dma_chan->device->dev,
1036 "Self-test zero sum failed compare, disabling\n");
1038 goto free_resources;
1041 /* test for non-zero parity sum */
1042 zero_sum_result = 0;
1043 for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 1; i++)
1044 dma_srcs[i] = dma_map_page(dma_chan->device->dev,
1045 zero_sum_srcs[i], 0, PAGE_SIZE,
1047 tx = iop_adma_prep_dma_xor_val(dma_chan, dma_srcs,
1048 IOP_ADMA_NUM_SRC_TEST + 1, PAGE_SIZE,
1050 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1052 cookie = iop_adma_tx_submit(tx);
1053 iop_adma_issue_pending(dma_chan);
1056 if (iop_adma_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
1057 dev_err(dma_chan->device->dev,
1058 "Self-test non-zero sum timed out, disabling\n");
1060 goto free_resources;
1063 if (zero_sum_result != 1) {
1064 dev_err(dma_chan->device->dev,
1065 "Self-test non-zero sum failed compare, disabling\n");
1067 goto free_resources;
1071 iop_adma_free_chan_resources(dma_chan);
1073 src_idx = IOP_ADMA_NUM_SRC_TEST;
1075 __free_page(xor_srcs[src_idx]);
1080 #ifdef CONFIG_RAID6_PQ
1082 iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device)
1084 /* combined sources, software pq results, and extra hw pq results */
1085 struct page *pq[IOP_ADMA_NUM_SRC_TEST+2+2];
1086 /* ptr to the extra hw pq buffers defined above */
1087 struct page **pq_hw = &pq[IOP_ADMA_NUM_SRC_TEST+2];
1088 /* address conversion buffers (dma_map / page_address) */
1089 void *pq_sw[IOP_ADMA_NUM_SRC_TEST+2];
1090 dma_addr_t pq_src[IOP_ADMA_NUM_SRC_TEST+2];
1091 dma_addr_t *pq_dest = &pq_src[IOP_ADMA_NUM_SRC_TEST];
1094 struct dma_async_tx_descriptor *tx;
1095 struct dma_chan *dma_chan;
1096 dma_cookie_t cookie;
1097 u32 zero_sum_result;
1101 dev_dbg(device->common.dev, "%s\n", __func__);
1103 for (i = 0; i < ARRAY_SIZE(pq); i++) {
1104 pq[i] = alloc_page(GFP_KERNEL);
1112 /* Fill in src buffers */
1113 for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++) {
1114 pq_sw[i] = page_address(pq[i]);
1115 memset(pq_sw[i], 0x11111111 * (1<<i), PAGE_SIZE);
1117 pq_sw[i] = page_address(pq[i]);
1118 pq_sw[i+1] = page_address(pq[i+1]);
1120 dma_chan = container_of(device->common.channels.next,
1123 if (iop_adma_alloc_chan_resources(dma_chan) < 1) {
1128 dev = dma_chan->device->dev;
1130 /* initialize the dests */
1131 memset(page_address(pq_hw[0]), 0 , PAGE_SIZE);
1132 memset(page_address(pq_hw[1]), 0 , PAGE_SIZE);
1135 pq_dest[0] = dma_map_page(dev, pq_hw[0], 0, PAGE_SIZE, DMA_FROM_DEVICE);
1136 pq_dest[1] = dma_map_page(dev, pq_hw[1], 0, PAGE_SIZE, DMA_FROM_DEVICE);
1137 for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++)
1138 pq_src[i] = dma_map_page(dev, pq[i], 0, PAGE_SIZE,
1141 tx = iop_adma_prep_dma_pq(dma_chan, pq_dest, pq_src,
1142 IOP_ADMA_NUM_SRC_TEST, (u8 *)raid6_gfexp,
1144 DMA_PREP_INTERRUPT |
1147 cookie = iop_adma_tx_submit(tx);
1148 iop_adma_issue_pending(dma_chan);
1151 if (iop_adma_status(dma_chan, cookie, NULL) !=
1153 dev_err(dev, "Self-test pq timed out, disabling\n");
1155 goto free_resources;
1158 raid6_call.gen_syndrome(IOP_ADMA_NUM_SRC_TEST+2, PAGE_SIZE, pq_sw);
1160 if (memcmp(pq_sw[IOP_ADMA_NUM_SRC_TEST],
1161 page_address(pq_hw[0]), PAGE_SIZE) != 0) {
1162 dev_err(dev, "Self-test p failed compare, disabling\n");
1164 goto free_resources;
1166 if (memcmp(pq_sw[IOP_ADMA_NUM_SRC_TEST+1],
1167 page_address(pq_hw[1]), PAGE_SIZE) != 0) {
1168 dev_err(dev, "Self-test q failed compare, disabling\n");
1170 goto free_resources;
1173 /* test correct zero sum using the software generated pq values */
1174 for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 2; i++)
1175 pq_src[i] = dma_map_page(dev, pq[i], 0, PAGE_SIZE,
1178 zero_sum_result = ~0;
1179 tx = iop_adma_prep_dma_pq_val(dma_chan, &pq_src[IOP_ADMA_NUM_SRC_TEST],
1180 pq_src, IOP_ADMA_NUM_SRC_TEST,
1181 raid6_gfexp, PAGE_SIZE, &zero_sum_result,
1182 DMA_PREP_INTERRUPT|DMA_CTRL_ACK);
1184 cookie = iop_adma_tx_submit(tx);
1185 iop_adma_issue_pending(dma_chan);
1188 if (iop_adma_status(dma_chan, cookie, NULL) !=
1190 dev_err(dev, "Self-test pq-zero-sum timed out, disabling\n");
1192 goto free_resources;
1195 if (zero_sum_result != 0) {
1196 dev_err(dev, "Self-test pq-zero-sum failed to validate: %x\n",
1199 goto free_resources;
1202 /* test incorrect zero sum */
1203 i = IOP_ADMA_NUM_SRC_TEST;
1204 memset(pq_sw[i] + 100, 0, 100);
1205 memset(pq_sw[i+1] + 200, 0, 200);
1206 for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 2; i++)
1207 pq_src[i] = dma_map_page(dev, pq[i], 0, PAGE_SIZE,
1210 zero_sum_result = 0;
1211 tx = iop_adma_prep_dma_pq_val(dma_chan, &pq_src[IOP_ADMA_NUM_SRC_TEST],
1212 pq_src, IOP_ADMA_NUM_SRC_TEST,
1213 raid6_gfexp, PAGE_SIZE, &zero_sum_result,
1214 DMA_PREP_INTERRUPT|DMA_CTRL_ACK);
1216 cookie = iop_adma_tx_submit(tx);
1217 iop_adma_issue_pending(dma_chan);
1220 if (iop_adma_status(dma_chan, cookie, NULL) !=
1222 dev_err(dev, "Self-test !pq-zero-sum timed out, disabling\n");
1224 goto free_resources;
1227 if (zero_sum_result != (SUM_CHECK_P_RESULT | SUM_CHECK_Q_RESULT)) {
1228 dev_err(dev, "Self-test !pq-zero-sum failed to validate: %x\n",
1231 goto free_resources;
1235 iop_adma_free_chan_resources(dma_chan);
1244 static int iop_adma_remove(struct platform_device *dev)
1246 struct iop_adma_device *device = platform_get_drvdata(dev);
1247 struct dma_chan *chan, *_chan;
1248 struct iop_adma_chan *iop_chan;
1249 struct iop_adma_platform_data *plat_data = dev_get_platdata(&dev->dev);
1251 dma_async_device_unregister(&device->common);
1253 dma_free_coherent(&dev->dev, plat_data->pool_size,
1254 device->dma_desc_pool_virt, device->dma_desc_pool);
1256 list_for_each_entry_safe(chan, _chan, &device->common.channels,
1258 iop_chan = to_iop_adma_chan(chan);
1259 list_del(&chan->device_node);
1267 static int iop_adma_probe(struct platform_device *pdev)
1269 struct resource *res;
1271 struct iop_adma_device *adev;
1272 struct iop_adma_chan *iop_chan;
1273 struct dma_device *dma_dev;
1274 struct iop_adma_platform_data *plat_data = dev_get_platdata(&pdev->dev);
1276 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1280 if (!devm_request_mem_region(&pdev->dev, res->start,
1281 resource_size(res), pdev->name))
1284 adev = kzalloc(sizeof(*adev), GFP_KERNEL);
1287 dma_dev = &adev->common;
1289 /* allocate coherent memory for hardware descriptors
1290 * note: writecombine gives slightly better performance, but
1291 * requires that we explicitly flush the writes
1293 adev->dma_desc_pool_virt = dma_alloc_wc(&pdev->dev,
1294 plat_data->pool_size,
1295 &adev->dma_desc_pool,
1297 if (!adev->dma_desc_pool_virt) {
1302 dev_dbg(&pdev->dev, "%s: allocated descriptor pool virt %p phys %p\n",
1303 __func__, adev->dma_desc_pool_virt,
1304 (void *) adev->dma_desc_pool);
1306 adev->id = plat_data->hw_id;
1308 /* discover transaction capabilites from the platform data */
1309 dma_dev->cap_mask = plat_data->cap_mask;
1312 platform_set_drvdata(pdev, adev);
1314 INIT_LIST_HEAD(&dma_dev->channels);
1316 /* set base routines */
1317 dma_dev->device_alloc_chan_resources = iop_adma_alloc_chan_resources;
1318 dma_dev->device_free_chan_resources = iop_adma_free_chan_resources;
1319 dma_dev->device_tx_status = iop_adma_status;
1320 dma_dev->device_issue_pending = iop_adma_issue_pending;
1321 dma_dev->dev = &pdev->dev;
1323 /* set prep routines based on capability */
1324 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
1325 dma_dev->device_prep_dma_memcpy = iop_adma_prep_dma_memcpy;
1326 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1327 dma_dev->max_xor = iop_adma_get_max_xor();
1328 dma_dev->device_prep_dma_xor = iop_adma_prep_dma_xor;
1330 if (dma_has_cap(DMA_XOR_VAL, dma_dev->cap_mask))
1331 dma_dev->device_prep_dma_xor_val =
1332 iop_adma_prep_dma_xor_val;
1333 if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
1334 dma_set_maxpq(dma_dev, iop_adma_get_max_pq(), 0);
1335 dma_dev->device_prep_dma_pq = iop_adma_prep_dma_pq;
1337 if (dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask))
1338 dma_dev->device_prep_dma_pq_val =
1339 iop_adma_prep_dma_pq_val;
1340 if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask))
1341 dma_dev->device_prep_dma_interrupt =
1342 iop_adma_prep_dma_interrupt;
1344 iop_chan = kzalloc(sizeof(*iop_chan), GFP_KERNEL);
1349 iop_chan->device = adev;
1351 iop_chan->mmr_base = devm_ioremap(&pdev->dev, res->start,
1352 resource_size(res));
1353 if (!iop_chan->mmr_base) {
1355 goto err_free_iop_chan;
1357 tasklet_init(&iop_chan->irq_tasklet, iop_adma_tasklet, (unsigned long)
1360 /* clear errors before enabling interrupts */
1361 iop_adma_device_clear_err_status(iop_chan);
1363 for (i = 0; i < 3; i++) {
1364 irq_handler_t handler[] = { iop_adma_eot_handler,
1365 iop_adma_eoc_handler,
1366 iop_adma_err_handler };
1367 int irq = platform_get_irq(pdev, i);
1370 goto err_free_iop_chan;
1372 ret = devm_request_irq(&pdev->dev, irq,
1373 handler[i], 0, pdev->name, iop_chan);
1375 goto err_free_iop_chan;
1379 spin_lock_init(&iop_chan->lock);
1380 INIT_LIST_HEAD(&iop_chan->chain);
1381 INIT_LIST_HEAD(&iop_chan->all_slots);
1382 iop_chan->common.device = dma_dev;
1383 dma_cookie_init(&iop_chan->common);
1384 list_add_tail(&iop_chan->common.device_node, &dma_dev->channels);
1386 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
1387 ret = iop_adma_memcpy_self_test(adev);
1388 dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret);
1390 goto err_free_iop_chan;
1393 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1394 ret = iop_adma_xor_val_self_test(adev);
1395 dev_dbg(&pdev->dev, "xor self test returned %d\n", ret);
1397 goto err_free_iop_chan;
1400 if (dma_has_cap(DMA_PQ, dma_dev->cap_mask) &&
1401 dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask)) {
1402 #ifdef CONFIG_RAID6_PQ
1403 ret = iop_adma_pq_zero_sum_self_test(adev);
1404 dev_dbg(&pdev->dev, "pq self test returned %d\n", ret);
1406 /* can not test raid6, so do not publish capability */
1407 dma_cap_clear(DMA_PQ, dma_dev->cap_mask);
1408 dma_cap_clear(DMA_PQ_VAL, dma_dev->cap_mask);
1412 goto err_free_iop_chan;
1415 dev_info(&pdev->dev, "Intel(R) IOP: ( %s%s%s%s%s%s)\n",
1416 dma_has_cap(DMA_PQ, dma_dev->cap_mask) ? "pq " : "",
1417 dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask) ? "pq_val " : "",
1418 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
1419 dma_has_cap(DMA_XOR_VAL, dma_dev->cap_mask) ? "xor_val " : "",
1420 dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
1421 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
1423 dma_async_device_register(dma_dev);
1429 dma_free_coherent(&adev->pdev->dev, plat_data->pool_size,
1430 adev->dma_desc_pool_virt, adev->dma_desc_pool);
1437 static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan)
1439 struct iop_adma_desc_slot *sw_desc, *grp_start;
1440 dma_cookie_t cookie;
1441 int slot_cnt, slots_per_op;
1443 dev_dbg(iop_chan->device->common.dev, "%s\n", __func__);
1445 spin_lock_bh(&iop_chan->lock);
1446 slot_cnt = iop_chan_memcpy_slot_count(0, &slots_per_op);
1447 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
1449 grp_start = sw_desc->group_head;
1451 list_splice_init(&sw_desc->tx_list, &iop_chan->chain);
1452 async_tx_ack(&sw_desc->async_tx);
1453 iop_desc_init_memcpy(grp_start, 0);
1454 iop_desc_set_byte_count(grp_start, iop_chan, 0);
1455 iop_desc_set_dest_addr(grp_start, iop_chan, 0);
1456 iop_desc_set_memcpy_src_addr(grp_start, 0);
1458 cookie = dma_cookie_assign(&sw_desc->async_tx);
1460 /* initialize the completed cookie to be less than
1461 * the most recently used cookie
1463 iop_chan->common.completed_cookie = cookie - 1;
1465 /* channel should not be busy */
1466 BUG_ON(iop_chan_is_busy(iop_chan));
1468 /* clear any prior error-status bits */
1469 iop_adma_device_clear_err_status(iop_chan);
1471 /* disable operation */
1472 iop_chan_disable(iop_chan);
1474 /* set the descriptor address */
1475 iop_chan_set_next_descriptor(iop_chan, sw_desc->async_tx.phys);
1477 /* 1/ don't add pre-chained descriptors
1478 * 2/ dummy read to flush next_desc write
1480 BUG_ON(iop_desc_get_next_desc(sw_desc));
1482 /* run the descriptor */
1483 iop_chan_enable(iop_chan);
1485 dev_err(iop_chan->device->common.dev,
1486 "failed to allocate null descriptor\n");
1487 spin_unlock_bh(&iop_chan->lock);
1490 static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan)
1492 struct iop_adma_desc_slot *sw_desc, *grp_start;
1493 dma_cookie_t cookie;
1494 int slot_cnt, slots_per_op;
1496 dev_dbg(iop_chan->device->common.dev, "%s\n", __func__);
1498 spin_lock_bh(&iop_chan->lock);
1499 slot_cnt = iop_chan_xor_slot_count(0, 2, &slots_per_op);
1500 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
1502 grp_start = sw_desc->group_head;
1503 list_splice_init(&sw_desc->tx_list, &iop_chan->chain);
1504 async_tx_ack(&sw_desc->async_tx);
1505 iop_desc_init_null_xor(grp_start, 2, 0);
1506 iop_desc_set_byte_count(grp_start, iop_chan, 0);
1507 iop_desc_set_dest_addr(grp_start, iop_chan, 0);
1508 iop_desc_set_xor_src_addr(grp_start, 0, 0);
1509 iop_desc_set_xor_src_addr(grp_start, 1, 0);
1511 cookie = dma_cookie_assign(&sw_desc->async_tx);
1513 /* initialize the completed cookie to be less than
1514 * the most recently used cookie
1516 iop_chan->common.completed_cookie = cookie - 1;
1518 /* channel should not be busy */
1519 BUG_ON(iop_chan_is_busy(iop_chan));
1521 /* clear any prior error-status bits */
1522 iop_adma_device_clear_err_status(iop_chan);
1524 /* disable operation */
1525 iop_chan_disable(iop_chan);
1527 /* set the descriptor address */
1528 iop_chan_set_next_descriptor(iop_chan, sw_desc->async_tx.phys);
1530 /* 1/ don't add pre-chained descriptors
1531 * 2/ dummy read to flush next_desc write
1533 BUG_ON(iop_desc_get_next_desc(sw_desc));
1535 /* run the descriptor */
1536 iop_chan_enable(iop_chan);
1538 dev_err(iop_chan->device->common.dev,
1539 "failed to allocate null descriptor\n");
1540 spin_unlock_bh(&iop_chan->lock);
1543 static struct platform_driver iop_adma_driver = {
1544 .probe = iop_adma_probe,
1545 .remove = iop_adma_remove,
1551 module_platform_driver(iop_adma_driver);
1553 MODULE_AUTHOR("Intel Corporation");
1554 MODULE_DESCRIPTION("IOP ADMA Engine Driver");
1555 MODULE_LICENSE("GPL");
1556 MODULE_ALIAS("platform:iop-adma");