1 // SPDX-License-Identifier: GPL-2.0-only
3 * offload engine driver for the Intel Xscale series of i/o processors
4 * Copyright © 2006, Intel Corporation.
8 * This driver supports the asynchrounous DMA copy and RAID engines available
9 * on the Intel Xscale(R) family of I/O Processors (IOP 32x, 33x, 134x)
12 #include <linux/init.h>
13 #include <linux/module.h>
14 #include <linux/delay.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/spinlock.h>
17 #include <linux/interrupt.h>
18 #include <linux/platform_device.h>
19 #include <linux/prefetch.h>
20 #include <linux/memory.h>
21 #include <linux/ioport.h>
22 #include <linux/raid/pq.h>
23 #include <linux/slab.h>
25 #include <mach/adma.h>
27 #include "dmaengine.h"
29 #define to_iop_adma_chan(chan) container_of(chan, struct iop_adma_chan, common)
30 #define to_iop_adma_device(dev) \
31 container_of(dev, struct iop_adma_device, common)
32 #define tx_to_iop_adma_slot(tx) \
33 container_of(tx, struct iop_adma_desc_slot, async_tx)
36 * iop_adma_free_slots - flags descriptor slots for reuse
38 * Caller must hold &iop_chan->lock while calling this function
40 static void iop_adma_free_slots(struct iop_adma_desc_slot *slot)
42 int stride = slot->slots_per_op;
45 slot->slots_per_op = 0;
46 slot = list_entry(slot->slot_node.next,
47 struct iop_adma_desc_slot,
53 iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc,
54 struct iop_adma_chan *iop_chan, dma_cookie_t cookie)
56 struct dma_async_tx_descriptor *tx = &desc->async_tx;
58 BUG_ON(tx->cookie < 0);
63 /* call the callback (must not sleep or submit new
64 * operations to this channel)
66 dmaengine_desc_get_callback_invoke(tx, NULL);
68 dma_descriptor_unmap(tx);
70 desc->group_head = NULL;
73 /* run dependent operations */
74 dma_run_dependencies(tx);
80 iop_adma_clean_slot(struct iop_adma_desc_slot *desc,
81 struct iop_adma_chan *iop_chan)
83 /* the client is allowed to attach dependent operations
86 if (!async_tx_test_ack(&desc->async_tx))
89 /* leave the last descriptor in the chain
90 * so we can append to it
92 if (desc->chain_node.next == &iop_chan->chain)
95 dev_dbg(iop_chan->device->common.dev,
96 "\tfree slot: %d slots_per_op: %d\n",
97 desc->idx, desc->slots_per_op);
99 list_del(&desc->chain_node);
100 iop_adma_free_slots(desc);
105 static void __iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan)
107 struct iop_adma_desc_slot *iter, *_iter, *grp_start = NULL;
108 dma_cookie_t cookie = 0;
109 u32 current_desc = iop_chan_get_current_descriptor(iop_chan);
110 int busy = iop_chan_is_busy(iop_chan);
111 int seen_current = 0, slot_cnt = 0, slots_per_op = 0;
113 dev_dbg(iop_chan->device->common.dev, "%s\n", __func__);
114 /* free completed slots from the chain starting with
115 * the oldest descriptor
117 list_for_each_entry_safe(iter, _iter, &iop_chan->chain,
119 pr_debug("\tcookie: %d slot: %d busy: %d "
120 "this_desc: %#x next_desc: %#llx ack: %d\n",
121 iter->async_tx.cookie, iter->idx, busy,
122 iter->async_tx.phys, (u64)iop_desc_get_next_desc(iter),
123 async_tx_test_ack(&iter->async_tx));
125 prefetch(&_iter->async_tx);
127 /* do not advance past the current descriptor loaded into the
128 * hardware channel, subsequent descriptors are either in
129 * process or have not been submitted
134 /* stop the search if we reach the current descriptor and the
135 * channel is busy, or if it appears that the current descriptor
136 * needs to be re-read (i.e. has been appended to)
138 if (iter->async_tx.phys == current_desc) {
139 BUG_ON(seen_current++);
140 if (busy || iop_desc_get_next_desc(iter))
144 /* detect the start of a group transaction */
145 if (!slot_cnt && !slots_per_op) {
146 slot_cnt = iter->slot_cnt;
147 slots_per_op = iter->slots_per_op;
148 if (slot_cnt <= slots_per_op) {
155 pr_debug("\tgroup++\n");
158 slot_cnt -= slots_per_op;
161 /* all the members of a group are complete */
162 if (slots_per_op != 0 && slot_cnt == 0) {
163 struct iop_adma_desc_slot *grp_iter, *_grp_iter;
164 int end_of_chain = 0;
165 pr_debug("\tgroup end\n");
167 /* collect the total results */
168 if (grp_start->xor_check_result) {
169 u32 zero_sum_result = 0;
170 slot_cnt = grp_start->slot_cnt;
171 grp_iter = grp_start;
173 list_for_each_entry_from(grp_iter,
174 &iop_chan->chain, chain_node) {
176 iop_desc_get_zero_result(grp_iter);
177 pr_debug("\titer%d result: %d\n",
178 grp_iter->idx, zero_sum_result);
179 slot_cnt -= slots_per_op;
183 pr_debug("\tgrp_start->xor_check_result: %p\n",
184 grp_start->xor_check_result);
185 *grp_start->xor_check_result = zero_sum_result;
188 /* clean up the group */
189 slot_cnt = grp_start->slot_cnt;
190 grp_iter = grp_start;
191 list_for_each_entry_safe_from(grp_iter, _grp_iter,
192 &iop_chan->chain, chain_node) {
193 cookie = iop_adma_run_tx_complete_actions(
194 grp_iter, iop_chan, cookie);
196 slot_cnt -= slots_per_op;
197 end_of_chain = iop_adma_clean_slot(grp_iter,
200 if (slot_cnt == 0 || end_of_chain)
204 /* the group should be complete at this point */
213 } else if (slots_per_op) /* wait for group completion */
216 /* write back zero sum results (single descriptor case) */
217 if (iter->xor_check_result && iter->async_tx.cookie)
218 *iter->xor_check_result =
219 iop_desc_get_zero_result(iter);
221 cookie = iop_adma_run_tx_complete_actions(
222 iter, iop_chan, cookie);
224 if (iop_adma_clean_slot(iter, iop_chan))
229 iop_chan->common.completed_cookie = cookie;
230 pr_debug("\tcompleted cookie %d\n", cookie);
235 iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan)
237 spin_lock_bh(&iop_chan->lock);
238 __iop_adma_slot_cleanup(iop_chan);
239 spin_unlock_bh(&iop_chan->lock);
242 static void iop_adma_tasklet(unsigned long data)
244 struct iop_adma_chan *iop_chan = (struct iop_adma_chan *) data;
246 /* lockdep will flag depedency submissions as potentially
247 * recursive locking, this is not the case as a dependency
248 * submission will never recurse a channels submit routine.
249 * There are checks in async_tx.c to prevent this.
251 spin_lock_nested(&iop_chan->lock, SINGLE_DEPTH_NESTING);
252 __iop_adma_slot_cleanup(iop_chan);
253 spin_unlock(&iop_chan->lock);
256 static struct iop_adma_desc_slot *
257 iop_adma_alloc_slots(struct iop_adma_chan *iop_chan, int num_slots,
260 struct iop_adma_desc_slot *iter, *_iter, *alloc_start = NULL;
262 int slots_found, retry = 0;
264 /* start search from the last allocated descrtiptor
265 * if a contiguous allocation can not be found start searching
266 * from the beginning of the list
271 iter = iop_chan->last_used;
273 iter = list_entry(&iop_chan->all_slots,
274 struct iop_adma_desc_slot,
277 list_for_each_entry_safe_continue(
278 iter, _iter, &iop_chan->all_slots, slot_node) {
280 prefetch(&_iter->async_tx);
281 if (iter->slots_per_op) {
282 /* give up after finding the first busy slot
283 * on the second pass through the list
292 /* start the allocation if the slot is correctly aligned */
293 if (!slots_found++) {
294 if (iop_desc_is_aligned(iter, slots_per_op))
302 if (slots_found == num_slots) {
303 struct iop_adma_desc_slot *alloc_tail = NULL;
304 struct iop_adma_desc_slot *last_used = NULL;
308 dev_dbg(iop_chan->device->common.dev,
309 "allocated slot: %d "
310 "(desc %p phys: %#llx) slots_per_op %d\n",
311 iter->idx, iter->hw_desc,
312 (u64)iter->async_tx.phys, slots_per_op);
314 /* pre-ack all but the last descriptor */
315 if (num_slots != slots_per_op)
316 async_tx_ack(&iter->async_tx);
318 list_add_tail(&iter->chain_node, &chain);
320 iter->async_tx.cookie = 0;
321 iter->slot_cnt = num_slots;
322 iter->xor_check_result = NULL;
323 for (i = 0; i < slots_per_op; i++) {
324 iter->slots_per_op = slots_per_op - i;
326 iter = list_entry(iter->slot_node.next,
327 struct iop_adma_desc_slot,
330 num_slots -= slots_per_op;
332 alloc_tail->group_head = alloc_start;
333 alloc_tail->async_tx.cookie = -EBUSY;
334 list_splice(&chain, &alloc_tail->tx_list);
335 iop_chan->last_used = last_used;
336 iop_desc_clear_next_desc(alloc_start);
337 iop_desc_clear_next_desc(alloc_tail);
344 /* perform direct reclaim if the allocation fails */
345 __iop_adma_slot_cleanup(iop_chan);
350 static void iop_adma_check_threshold(struct iop_adma_chan *iop_chan)
352 dev_dbg(iop_chan->device->common.dev, "pending: %d\n",
355 if (iop_chan->pending >= IOP_ADMA_THRESHOLD) {
356 iop_chan->pending = 0;
357 iop_chan_append(iop_chan);
362 iop_adma_tx_submit(struct dma_async_tx_descriptor *tx)
364 struct iop_adma_desc_slot *sw_desc = tx_to_iop_adma_slot(tx);
365 struct iop_adma_chan *iop_chan = to_iop_adma_chan(tx->chan);
366 struct iop_adma_desc_slot *grp_start, *old_chain_tail;
372 grp_start = sw_desc->group_head;
373 slot_cnt = grp_start->slot_cnt;
374 slots_per_op = grp_start->slots_per_op;
376 spin_lock_bh(&iop_chan->lock);
377 cookie = dma_cookie_assign(tx);
379 old_chain_tail = list_entry(iop_chan->chain.prev,
380 struct iop_adma_desc_slot, chain_node);
381 list_splice_init(&sw_desc->tx_list,
382 &old_chain_tail->chain_node);
384 /* fix up the hardware chain */
385 next_dma = grp_start->async_tx.phys;
386 iop_desc_set_next_desc(old_chain_tail, next_dma);
387 BUG_ON(iop_desc_get_next_desc(old_chain_tail) != next_dma); /* flush */
389 /* check for pre-chained descriptors */
390 iop_paranoia(iop_desc_get_next_desc(sw_desc));
392 /* increment the pending count by the number of slots
393 * memcpy operations have a 1:1 (slot:operation) relation
394 * other operations are heavier and will pop the threshold
397 iop_chan->pending += slot_cnt;
398 iop_adma_check_threshold(iop_chan);
399 spin_unlock_bh(&iop_chan->lock);
401 dev_dbg(iop_chan->device->common.dev, "%s cookie: %d slot: %d\n",
402 __func__, sw_desc->async_tx.cookie, sw_desc->idx);
407 static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan);
408 static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan);
411 * iop_adma_alloc_chan_resources - returns the number of allocated descriptors
412 * @chan - allocate descriptor resources for this channel
413 * @client - current client requesting the channel be ready for requests
415 * Note: We keep the slots for 1 operation on iop_chan->chain at all times. To
416 * avoid deadlock, via async_xor, num_descs_in_pool must at a minimum be
417 * greater than 2x the number slots needed to satisfy a device->max_xor
420 static int iop_adma_alloc_chan_resources(struct dma_chan *chan)
424 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
425 struct iop_adma_desc_slot *slot = NULL;
426 int init = iop_chan->slots_allocated ? 0 : 1;
427 struct iop_adma_platform_data *plat_data =
428 dev_get_platdata(&iop_chan->device->pdev->dev);
429 int num_descs_in_pool = plat_data->pool_size/IOP_ADMA_SLOT_SIZE;
431 /* Allocate descriptor slots */
433 idx = iop_chan->slots_allocated;
434 if (idx == num_descs_in_pool)
437 slot = kzalloc(sizeof(*slot), GFP_KERNEL);
439 printk(KERN_INFO "IOP ADMA Channel only initialized"
440 " %d descriptor slots", idx);
443 hw_desc = (char *) iop_chan->device->dma_desc_pool_virt;
444 slot->hw_desc = (void *) &hw_desc[idx * IOP_ADMA_SLOT_SIZE];
446 dma_async_tx_descriptor_init(&slot->async_tx, chan);
447 slot->async_tx.tx_submit = iop_adma_tx_submit;
448 INIT_LIST_HEAD(&slot->tx_list);
449 INIT_LIST_HEAD(&slot->chain_node);
450 INIT_LIST_HEAD(&slot->slot_node);
451 hw_desc = (char *) iop_chan->device->dma_desc_pool;
452 slot->async_tx.phys =
453 (dma_addr_t) &hw_desc[idx * IOP_ADMA_SLOT_SIZE];
456 spin_lock_bh(&iop_chan->lock);
457 iop_chan->slots_allocated++;
458 list_add_tail(&slot->slot_node, &iop_chan->all_slots);
459 spin_unlock_bh(&iop_chan->lock);
460 } while (iop_chan->slots_allocated < num_descs_in_pool);
462 if (idx && !iop_chan->last_used)
463 iop_chan->last_used = list_entry(iop_chan->all_slots.next,
464 struct iop_adma_desc_slot,
467 dev_dbg(iop_chan->device->common.dev,
468 "allocated %d descriptor slots last_used: %p\n",
469 iop_chan->slots_allocated, iop_chan->last_used);
471 /* initialize the channel and the chain with a null operation */
473 if (dma_has_cap(DMA_MEMCPY,
474 iop_chan->device->common.cap_mask))
475 iop_chan_start_null_memcpy(iop_chan);
476 else if (dma_has_cap(DMA_XOR,
477 iop_chan->device->common.cap_mask))
478 iop_chan_start_null_xor(iop_chan);
483 return (idx > 0) ? idx : -ENOMEM;
486 static struct dma_async_tx_descriptor *
487 iop_adma_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags)
489 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
490 struct iop_adma_desc_slot *sw_desc, *grp_start;
491 int slot_cnt, slots_per_op;
493 dev_dbg(iop_chan->device->common.dev, "%s\n", __func__);
495 spin_lock_bh(&iop_chan->lock);
496 slot_cnt = iop_chan_interrupt_slot_count(&slots_per_op, iop_chan);
497 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
499 grp_start = sw_desc->group_head;
500 iop_desc_init_interrupt(grp_start, iop_chan);
501 sw_desc->async_tx.flags = flags;
503 spin_unlock_bh(&iop_chan->lock);
505 return sw_desc ? &sw_desc->async_tx : NULL;
508 static struct dma_async_tx_descriptor *
509 iop_adma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest,
510 dma_addr_t dma_src, size_t len, unsigned long flags)
512 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
513 struct iop_adma_desc_slot *sw_desc, *grp_start;
514 int slot_cnt, slots_per_op;
518 BUG_ON(len > IOP_ADMA_MAX_BYTE_COUNT);
520 dev_dbg(iop_chan->device->common.dev, "%s len: %zu\n",
523 spin_lock_bh(&iop_chan->lock);
524 slot_cnt = iop_chan_memcpy_slot_count(len, &slots_per_op);
525 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
527 grp_start = sw_desc->group_head;
528 iop_desc_init_memcpy(grp_start, flags);
529 iop_desc_set_byte_count(grp_start, iop_chan, len);
530 iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest);
531 iop_desc_set_memcpy_src_addr(grp_start, dma_src);
532 sw_desc->async_tx.flags = flags;
534 spin_unlock_bh(&iop_chan->lock);
536 return sw_desc ? &sw_desc->async_tx : NULL;
539 static struct dma_async_tx_descriptor *
540 iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest,
541 dma_addr_t *dma_src, unsigned int src_cnt, size_t len,
544 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
545 struct iop_adma_desc_slot *sw_desc, *grp_start;
546 int slot_cnt, slots_per_op;
550 BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT);
552 dev_dbg(iop_chan->device->common.dev,
553 "%s src_cnt: %d len: %zu flags: %lx\n",
554 __func__, src_cnt, len, flags);
556 spin_lock_bh(&iop_chan->lock);
557 slot_cnt = iop_chan_xor_slot_count(len, src_cnt, &slots_per_op);
558 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
560 grp_start = sw_desc->group_head;
561 iop_desc_init_xor(grp_start, src_cnt, flags);
562 iop_desc_set_byte_count(grp_start, iop_chan, len);
563 iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest);
564 sw_desc->async_tx.flags = flags;
566 iop_desc_set_xor_src_addr(grp_start, src_cnt,
569 spin_unlock_bh(&iop_chan->lock);
571 return sw_desc ? &sw_desc->async_tx : NULL;
574 static struct dma_async_tx_descriptor *
575 iop_adma_prep_dma_xor_val(struct dma_chan *chan, dma_addr_t *dma_src,
576 unsigned int src_cnt, size_t len, u32 *result,
579 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
580 struct iop_adma_desc_slot *sw_desc, *grp_start;
581 int slot_cnt, slots_per_op;
586 dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %zu\n",
587 __func__, src_cnt, len);
589 spin_lock_bh(&iop_chan->lock);
590 slot_cnt = iop_chan_zero_sum_slot_count(len, src_cnt, &slots_per_op);
591 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
593 grp_start = sw_desc->group_head;
594 iop_desc_init_zero_sum(grp_start, src_cnt, flags);
595 iop_desc_set_zero_sum_byte_count(grp_start, len);
596 grp_start->xor_check_result = result;
597 pr_debug("\t%s: grp_start->xor_check_result: %p\n",
598 __func__, grp_start->xor_check_result);
599 sw_desc->async_tx.flags = flags;
601 iop_desc_set_zero_sum_src_addr(grp_start, src_cnt,
604 spin_unlock_bh(&iop_chan->lock);
606 return sw_desc ? &sw_desc->async_tx : NULL;
609 static struct dma_async_tx_descriptor *
610 iop_adma_prep_dma_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
611 unsigned int src_cnt, const unsigned char *scf, size_t len,
614 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
615 struct iop_adma_desc_slot *sw_desc, *g;
616 int slot_cnt, slots_per_op;
621 BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT);
623 dev_dbg(iop_chan->device->common.dev,
624 "%s src_cnt: %d len: %zu flags: %lx\n",
625 __func__, src_cnt, len, flags);
627 if (dmaf_p_disabled_continue(flags))
628 continue_srcs = 1+src_cnt;
629 else if (dmaf_continue(flags))
630 continue_srcs = 3+src_cnt;
632 continue_srcs = 0+src_cnt;
634 spin_lock_bh(&iop_chan->lock);
635 slot_cnt = iop_chan_pq_slot_count(len, continue_srcs, &slots_per_op);
636 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
640 g = sw_desc->group_head;
641 iop_desc_set_byte_count(g, iop_chan, len);
643 /* even if P is disabled its destination address (bits
644 * [3:0]) must match Q. It is ok if P points to an
645 * invalid address, it won't be written.
647 if (flags & DMA_PREP_PQ_DISABLE_P)
648 dst[0] = dst[1] & 0x7;
650 iop_desc_set_pq_addr(g, dst);
651 sw_desc->async_tx.flags = flags;
652 for (i = 0; i < src_cnt; i++)
653 iop_desc_set_pq_src_addr(g, i, src[i], scf[i]);
655 /* if we are continuing a previous operation factor in
656 * the old p and q values, see the comment for dma_maxpq
657 * in include/linux/dmaengine.h
659 if (dmaf_p_disabled_continue(flags))
660 iop_desc_set_pq_src_addr(g, i++, dst[1], 1);
661 else if (dmaf_continue(flags)) {
662 iop_desc_set_pq_src_addr(g, i++, dst[0], 0);
663 iop_desc_set_pq_src_addr(g, i++, dst[1], 1);
664 iop_desc_set_pq_src_addr(g, i++, dst[1], 0);
666 iop_desc_init_pq(g, i, flags);
668 spin_unlock_bh(&iop_chan->lock);
670 return sw_desc ? &sw_desc->async_tx : NULL;
673 static struct dma_async_tx_descriptor *
674 iop_adma_prep_dma_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
675 unsigned int src_cnt, const unsigned char *scf,
676 size_t len, enum sum_check_flags *pqres,
679 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
680 struct iop_adma_desc_slot *sw_desc, *g;
681 int slot_cnt, slots_per_op;
685 BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT);
687 dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %zu\n",
688 __func__, src_cnt, len);
690 spin_lock_bh(&iop_chan->lock);
691 slot_cnt = iop_chan_pq_zero_sum_slot_count(len, src_cnt + 2, &slots_per_op);
692 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
694 /* for validate operations p and q are tagged onto the
695 * end of the source list
697 int pq_idx = src_cnt;
699 g = sw_desc->group_head;
700 iop_desc_init_pq_zero_sum(g, src_cnt+2, flags);
701 iop_desc_set_pq_zero_sum_byte_count(g, len);
702 g->pq_check_result = pqres;
703 pr_debug("\t%s: g->pq_check_result: %p\n",
704 __func__, g->pq_check_result);
705 sw_desc->async_tx.flags = flags;
707 iop_desc_set_pq_zero_sum_src_addr(g, src_cnt,
710 iop_desc_set_pq_zero_sum_addr(g, pq_idx, src);
712 spin_unlock_bh(&iop_chan->lock);
714 return sw_desc ? &sw_desc->async_tx : NULL;
717 static void iop_adma_free_chan_resources(struct dma_chan *chan)
719 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
720 struct iop_adma_desc_slot *iter, *_iter;
721 int in_use_descs = 0;
723 iop_adma_slot_cleanup(iop_chan);
725 spin_lock_bh(&iop_chan->lock);
726 list_for_each_entry_safe(iter, _iter, &iop_chan->chain,
729 list_del(&iter->chain_node);
731 list_for_each_entry_safe_reverse(
732 iter, _iter, &iop_chan->all_slots, slot_node) {
733 list_del(&iter->slot_node);
735 iop_chan->slots_allocated--;
737 iop_chan->last_used = NULL;
739 dev_dbg(iop_chan->device->common.dev, "%s slots_allocated %d\n",
740 __func__, iop_chan->slots_allocated);
741 spin_unlock_bh(&iop_chan->lock);
743 /* one is ok since we left it on there on purpose */
744 if (in_use_descs > 1)
745 printk(KERN_ERR "IOP: Freeing %d in use descriptors!\n",
750 * iop_adma_status - poll the status of an ADMA transaction
751 * @chan: ADMA channel handle
752 * @cookie: ADMA transaction identifier
753 * @txstate: a holder for the current state of the channel or NULL
755 static enum dma_status iop_adma_status(struct dma_chan *chan,
757 struct dma_tx_state *txstate)
759 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
762 ret = dma_cookie_status(chan, cookie, txstate);
763 if (ret == DMA_COMPLETE)
766 iop_adma_slot_cleanup(iop_chan);
768 return dma_cookie_status(chan, cookie, txstate);
771 static irqreturn_t iop_adma_eot_handler(int irq, void *data)
773 struct iop_adma_chan *chan = data;
775 dev_dbg(chan->device->common.dev, "%s\n", __func__);
777 tasklet_schedule(&chan->irq_tasklet);
779 iop_adma_device_clear_eot_status(chan);
784 static irqreturn_t iop_adma_eoc_handler(int irq, void *data)
786 struct iop_adma_chan *chan = data;
788 dev_dbg(chan->device->common.dev, "%s\n", __func__);
790 tasklet_schedule(&chan->irq_tasklet);
792 iop_adma_device_clear_eoc_status(chan);
797 static irqreturn_t iop_adma_err_handler(int irq, void *data)
799 struct iop_adma_chan *chan = data;
800 unsigned long status = iop_chan_get_status(chan);
802 dev_err(chan->device->common.dev,
803 "error ( %s%s%s%s%s%s%s)\n",
804 iop_is_err_int_parity(status, chan) ? "int_parity " : "",
805 iop_is_err_mcu_abort(status, chan) ? "mcu_abort " : "",
806 iop_is_err_int_tabort(status, chan) ? "int_tabort " : "",
807 iop_is_err_int_mabort(status, chan) ? "int_mabort " : "",
808 iop_is_err_pci_tabort(status, chan) ? "pci_tabort " : "",
809 iop_is_err_pci_mabort(status, chan) ? "pci_mabort " : "",
810 iop_is_err_split_tx(status, chan) ? "split_tx " : "");
812 iop_adma_device_clear_err_status(chan);
819 static void iop_adma_issue_pending(struct dma_chan *chan)
821 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
823 if (iop_chan->pending) {
824 iop_chan->pending = 0;
825 iop_chan_append(iop_chan);
830 * Perform a transaction to verify the HW works.
832 #define IOP_ADMA_TEST_SIZE 2000
834 static int iop_adma_memcpy_self_test(struct iop_adma_device *device)
838 dma_addr_t src_dma, dest_dma;
839 struct dma_chan *dma_chan;
841 struct dma_async_tx_descriptor *tx;
843 struct iop_adma_chan *iop_chan;
845 dev_dbg(device->common.dev, "%s\n", __func__);
847 src = kmalloc(IOP_ADMA_TEST_SIZE, GFP_KERNEL);
850 dest = kzalloc(IOP_ADMA_TEST_SIZE, GFP_KERNEL);
856 /* Fill in src buffer */
857 for (i = 0; i < IOP_ADMA_TEST_SIZE; i++)
858 ((u8 *) src)[i] = (u8)i;
860 /* Start copy, using first DMA channel */
861 dma_chan = container_of(device->common.channels.next,
864 if (iop_adma_alloc_chan_resources(dma_chan) < 1) {
869 dest_dma = dma_map_single(dma_chan->device->dev, dest,
870 IOP_ADMA_TEST_SIZE, DMA_FROM_DEVICE);
871 src_dma = dma_map_single(dma_chan->device->dev, src,
872 IOP_ADMA_TEST_SIZE, DMA_TO_DEVICE);
873 tx = iop_adma_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
875 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
877 cookie = iop_adma_tx_submit(tx);
878 iop_adma_issue_pending(dma_chan);
881 if (iop_adma_status(dma_chan, cookie, NULL) !=
883 dev_err(dma_chan->device->dev,
884 "Self-test copy timed out, disabling\n");
889 iop_chan = to_iop_adma_chan(dma_chan);
890 dma_sync_single_for_cpu(&iop_chan->device->pdev->dev, dest_dma,
891 IOP_ADMA_TEST_SIZE, DMA_FROM_DEVICE);
892 if (memcmp(src, dest, IOP_ADMA_TEST_SIZE)) {
893 dev_err(dma_chan->device->dev,
894 "Self-test copy failed compare, disabling\n");
900 iop_adma_free_chan_resources(dma_chan);
907 #define IOP_ADMA_NUM_SRC_TEST 4 /* must be <= 15 */
909 iop_adma_xor_val_self_test(struct iop_adma_device *device)
913 struct page *xor_srcs[IOP_ADMA_NUM_SRC_TEST];
914 struct page *zero_sum_srcs[IOP_ADMA_NUM_SRC_TEST + 1];
915 dma_addr_t dma_srcs[IOP_ADMA_NUM_SRC_TEST + 1];
917 struct dma_async_tx_descriptor *tx;
918 struct dma_chan *dma_chan;
924 struct iop_adma_chan *iop_chan;
926 dev_dbg(device->common.dev, "%s\n", __func__);
928 for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++) {
929 xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
930 if (!xor_srcs[src_idx]) {
932 __free_page(xor_srcs[src_idx]);
937 dest = alloc_page(GFP_KERNEL);
940 __free_page(xor_srcs[src_idx]);
944 /* Fill in src buffers */
945 for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++) {
946 u8 *ptr = page_address(xor_srcs[src_idx]);
947 for (i = 0; i < PAGE_SIZE; i++)
948 ptr[i] = (1 << src_idx);
951 for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++)
952 cmp_byte ^= (u8) (1 << src_idx);
954 cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
955 (cmp_byte << 8) | cmp_byte;
957 memset(page_address(dest), 0, PAGE_SIZE);
959 dma_chan = container_of(device->common.channels.next,
962 if (iop_adma_alloc_chan_resources(dma_chan) < 1) {
968 dest_dma = dma_map_page(dma_chan->device->dev, dest, 0,
969 PAGE_SIZE, DMA_FROM_DEVICE);
970 for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++)
971 dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
972 0, PAGE_SIZE, DMA_TO_DEVICE);
973 tx = iop_adma_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
974 IOP_ADMA_NUM_SRC_TEST, PAGE_SIZE,
975 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
977 cookie = iop_adma_tx_submit(tx);
978 iop_adma_issue_pending(dma_chan);
981 if (iop_adma_status(dma_chan, cookie, NULL) !=
983 dev_err(dma_chan->device->dev,
984 "Self-test xor timed out, disabling\n");
989 iop_chan = to_iop_adma_chan(dma_chan);
990 dma_sync_single_for_cpu(&iop_chan->device->pdev->dev, dest_dma,
991 PAGE_SIZE, DMA_FROM_DEVICE);
992 for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
993 u32 *ptr = page_address(dest);
994 if (ptr[i] != cmp_word) {
995 dev_err(dma_chan->device->dev,
996 "Self-test xor failed compare, disabling\n");
1001 dma_sync_single_for_device(&iop_chan->device->pdev->dev, dest_dma,
1002 PAGE_SIZE, DMA_TO_DEVICE);
1004 /* skip zero sum if the capability is not present */
1005 if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask))
1006 goto free_resources;
1008 /* zero sum the sources with the destintation page */
1009 for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++)
1010 zero_sum_srcs[i] = xor_srcs[i];
1011 zero_sum_srcs[i] = dest;
1013 zero_sum_result = 1;
1015 for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 1; i++)
1016 dma_srcs[i] = dma_map_page(dma_chan->device->dev,
1017 zero_sum_srcs[i], 0, PAGE_SIZE,
1019 tx = iop_adma_prep_dma_xor_val(dma_chan, dma_srcs,
1020 IOP_ADMA_NUM_SRC_TEST + 1, PAGE_SIZE,
1022 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1024 cookie = iop_adma_tx_submit(tx);
1025 iop_adma_issue_pending(dma_chan);
1028 if (iop_adma_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
1029 dev_err(dma_chan->device->dev,
1030 "Self-test zero sum timed out, disabling\n");
1032 goto free_resources;
1035 if (zero_sum_result != 0) {
1036 dev_err(dma_chan->device->dev,
1037 "Self-test zero sum failed compare, disabling\n");
1039 goto free_resources;
1042 /* test for non-zero parity sum */
1043 zero_sum_result = 0;
1044 for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 1; i++)
1045 dma_srcs[i] = dma_map_page(dma_chan->device->dev,
1046 zero_sum_srcs[i], 0, PAGE_SIZE,
1048 tx = iop_adma_prep_dma_xor_val(dma_chan, dma_srcs,
1049 IOP_ADMA_NUM_SRC_TEST + 1, PAGE_SIZE,
1051 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1053 cookie = iop_adma_tx_submit(tx);
1054 iop_adma_issue_pending(dma_chan);
1057 if (iop_adma_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
1058 dev_err(dma_chan->device->dev,
1059 "Self-test non-zero sum timed out, disabling\n");
1061 goto free_resources;
1064 if (zero_sum_result != 1) {
1065 dev_err(dma_chan->device->dev,
1066 "Self-test non-zero sum failed compare, disabling\n");
1068 goto free_resources;
1072 iop_adma_free_chan_resources(dma_chan);
1074 src_idx = IOP_ADMA_NUM_SRC_TEST;
1076 __free_page(xor_srcs[src_idx]);
1081 #ifdef CONFIG_RAID6_PQ
1083 iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device)
1085 /* combined sources, software pq results, and extra hw pq results */
1086 struct page *pq[IOP_ADMA_NUM_SRC_TEST+2+2];
1087 /* ptr to the extra hw pq buffers defined above */
1088 struct page **pq_hw = &pq[IOP_ADMA_NUM_SRC_TEST+2];
1089 /* address conversion buffers (dma_map / page_address) */
1090 void *pq_sw[IOP_ADMA_NUM_SRC_TEST+2];
1091 dma_addr_t pq_src[IOP_ADMA_NUM_SRC_TEST+2];
1092 dma_addr_t *pq_dest = &pq_src[IOP_ADMA_NUM_SRC_TEST];
1095 struct dma_async_tx_descriptor *tx;
1096 struct dma_chan *dma_chan;
1097 dma_cookie_t cookie;
1098 u32 zero_sum_result;
1102 dev_dbg(device->common.dev, "%s\n", __func__);
1104 for (i = 0; i < ARRAY_SIZE(pq); i++) {
1105 pq[i] = alloc_page(GFP_KERNEL);
1113 /* Fill in src buffers */
1114 for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++) {
1115 pq_sw[i] = page_address(pq[i]);
1116 memset(pq_sw[i], 0x11111111 * (1<<i), PAGE_SIZE);
1118 pq_sw[i] = page_address(pq[i]);
1119 pq_sw[i+1] = page_address(pq[i+1]);
1121 dma_chan = container_of(device->common.channels.next,
1124 if (iop_adma_alloc_chan_resources(dma_chan) < 1) {
1129 dev = dma_chan->device->dev;
1131 /* initialize the dests */
1132 memset(page_address(pq_hw[0]), 0 , PAGE_SIZE);
1133 memset(page_address(pq_hw[1]), 0 , PAGE_SIZE);
1136 pq_dest[0] = dma_map_page(dev, pq_hw[0], 0, PAGE_SIZE, DMA_FROM_DEVICE);
1137 pq_dest[1] = dma_map_page(dev, pq_hw[1], 0, PAGE_SIZE, DMA_FROM_DEVICE);
1138 for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++)
1139 pq_src[i] = dma_map_page(dev, pq[i], 0, PAGE_SIZE,
1142 tx = iop_adma_prep_dma_pq(dma_chan, pq_dest, pq_src,
1143 IOP_ADMA_NUM_SRC_TEST, (u8 *)raid6_gfexp,
1145 DMA_PREP_INTERRUPT |
1148 cookie = iop_adma_tx_submit(tx);
1149 iop_adma_issue_pending(dma_chan);
1152 if (iop_adma_status(dma_chan, cookie, NULL) !=
1154 dev_err(dev, "Self-test pq timed out, disabling\n");
1156 goto free_resources;
1159 raid6_call.gen_syndrome(IOP_ADMA_NUM_SRC_TEST+2, PAGE_SIZE, pq_sw);
1161 if (memcmp(pq_sw[IOP_ADMA_NUM_SRC_TEST],
1162 page_address(pq_hw[0]), PAGE_SIZE) != 0) {
1163 dev_err(dev, "Self-test p failed compare, disabling\n");
1165 goto free_resources;
1167 if (memcmp(pq_sw[IOP_ADMA_NUM_SRC_TEST+1],
1168 page_address(pq_hw[1]), PAGE_SIZE) != 0) {
1169 dev_err(dev, "Self-test q failed compare, disabling\n");
1171 goto free_resources;
1174 /* test correct zero sum using the software generated pq values */
1175 for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 2; i++)
1176 pq_src[i] = dma_map_page(dev, pq[i], 0, PAGE_SIZE,
1179 zero_sum_result = ~0;
1180 tx = iop_adma_prep_dma_pq_val(dma_chan, &pq_src[IOP_ADMA_NUM_SRC_TEST],
1181 pq_src, IOP_ADMA_NUM_SRC_TEST,
1182 raid6_gfexp, PAGE_SIZE, &zero_sum_result,
1183 DMA_PREP_INTERRUPT|DMA_CTRL_ACK);
1185 cookie = iop_adma_tx_submit(tx);
1186 iop_adma_issue_pending(dma_chan);
1189 if (iop_adma_status(dma_chan, cookie, NULL) !=
1191 dev_err(dev, "Self-test pq-zero-sum timed out, disabling\n");
1193 goto free_resources;
1196 if (zero_sum_result != 0) {
1197 dev_err(dev, "Self-test pq-zero-sum failed to validate: %x\n",
1200 goto free_resources;
1203 /* test incorrect zero sum */
1204 i = IOP_ADMA_NUM_SRC_TEST;
1205 memset(pq_sw[i] + 100, 0, 100);
1206 memset(pq_sw[i+1] + 200, 0, 200);
1207 for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 2; i++)
1208 pq_src[i] = dma_map_page(dev, pq[i], 0, PAGE_SIZE,
1211 zero_sum_result = 0;
1212 tx = iop_adma_prep_dma_pq_val(dma_chan, &pq_src[IOP_ADMA_NUM_SRC_TEST],
1213 pq_src, IOP_ADMA_NUM_SRC_TEST,
1214 raid6_gfexp, PAGE_SIZE, &zero_sum_result,
1215 DMA_PREP_INTERRUPT|DMA_CTRL_ACK);
1217 cookie = iop_adma_tx_submit(tx);
1218 iop_adma_issue_pending(dma_chan);
1221 if (iop_adma_status(dma_chan, cookie, NULL) !=
1223 dev_err(dev, "Self-test !pq-zero-sum timed out, disabling\n");
1225 goto free_resources;
1228 if (zero_sum_result != (SUM_CHECK_P_RESULT | SUM_CHECK_Q_RESULT)) {
1229 dev_err(dev, "Self-test !pq-zero-sum failed to validate: %x\n",
1232 goto free_resources;
1236 iop_adma_free_chan_resources(dma_chan);
1245 static int iop_adma_remove(struct platform_device *dev)
1247 struct iop_adma_device *device = platform_get_drvdata(dev);
1248 struct dma_chan *chan, *_chan;
1249 struct iop_adma_chan *iop_chan;
1250 struct iop_adma_platform_data *plat_data = dev_get_platdata(&dev->dev);
1252 dma_async_device_unregister(&device->common);
1254 dma_free_coherent(&dev->dev, plat_data->pool_size,
1255 device->dma_desc_pool_virt, device->dma_desc_pool);
1257 list_for_each_entry_safe(chan, _chan, &device->common.channels,
1259 iop_chan = to_iop_adma_chan(chan);
1260 list_del(&chan->device_node);
1268 static int iop_adma_probe(struct platform_device *pdev)
1270 struct resource *res;
1272 struct iop_adma_device *adev;
1273 struct iop_adma_chan *iop_chan;
1274 struct dma_device *dma_dev;
1275 struct iop_adma_platform_data *plat_data = dev_get_platdata(&pdev->dev);
1277 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1281 if (!devm_request_mem_region(&pdev->dev, res->start,
1282 resource_size(res), pdev->name))
1285 adev = kzalloc(sizeof(*adev), GFP_KERNEL);
1288 dma_dev = &adev->common;
1290 /* allocate coherent memory for hardware descriptors
1291 * note: writecombine gives slightly better performance, but
1292 * requires that we explicitly flush the writes
1294 adev->dma_desc_pool_virt = dma_alloc_wc(&pdev->dev,
1295 plat_data->pool_size,
1296 &adev->dma_desc_pool,
1298 if (!adev->dma_desc_pool_virt) {
1303 dev_dbg(&pdev->dev, "%s: allocated descriptor pool virt %p phys %p\n",
1304 __func__, adev->dma_desc_pool_virt,
1305 (void *) adev->dma_desc_pool);
1307 adev->id = plat_data->hw_id;
1309 /* discover transaction capabilites from the platform data */
1310 dma_dev->cap_mask = plat_data->cap_mask;
1313 platform_set_drvdata(pdev, adev);
1315 INIT_LIST_HEAD(&dma_dev->channels);
1317 /* set base routines */
1318 dma_dev->device_alloc_chan_resources = iop_adma_alloc_chan_resources;
1319 dma_dev->device_free_chan_resources = iop_adma_free_chan_resources;
1320 dma_dev->device_tx_status = iop_adma_status;
1321 dma_dev->device_issue_pending = iop_adma_issue_pending;
1322 dma_dev->dev = &pdev->dev;
1324 /* set prep routines based on capability */
1325 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
1326 dma_dev->device_prep_dma_memcpy = iop_adma_prep_dma_memcpy;
1327 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1328 dma_dev->max_xor = iop_adma_get_max_xor();
1329 dma_dev->device_prep_dma_xor = iop_adma_prep_dma_xor;
1331 if (dma_has_cap(DMA_XOR_VAL, dma_dev->cap_mask))
1332 dma_dev->device_prep_dma_xor_val =
1333 iop_adma_prep_dma_xor_val;
1334 if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
1335 dma_set_maxpq(dma_dev, iop_adma_get_max_pq(), 0);
1336 dma_dev->device_prep_dma_pq = iop_adma_prep_dma_pq;
1338 if (dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask))
1339 dma_dev->device_prep_dma_pq_val =
1340 iop_adma_prep_dma_pq_val;
1341 if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask))
1342 dma_dev->device_prep_dma_interrupt =
1343 iop_adma_prep_dma_interrupt;
1345 iop_chan = kzalloc(sizeof(*iop_chan), GFP_KERNEL);
1350 iop_chan->device = adev;
1352 iop_chan->mmr_base = devm_ioremap(&pdev->dev, res->start,
1353 resource_size(res));
1354 if (!iop_chan->mmr_base) {
1356 goto err_free_iop_chan;
1358 tasklet_init(&iop_chan->irq_tasklet, iop_adma_tasklet, (unsigned long)
1361 /* clear errors before enabling interrupts */
1362 iop_adma_device_clear_err_status(iop_chan);
1364 for (i = 0; i < 3; i++) {
1365 irq_handler_t handler[] = { iop_adma_eot_handler,
1366 iop_adma_eoc_handler,
1367 iop_adma_err_handler };
1368 int irq = platform_get_irq(pdev, i);
1371 goto err_free_iop_chan;
1373 ret = devm_request_irq(&pdev->dev, irq,
1374 handler[i], 0, pdev->name, iop_chan);
1376 goto err_free_iop_chan;
1380 spin_lock_init(&iop_chan->lock);
1381 INIT_LIST_HEAD(&iop_chan->chain);
1382 INIT_LIST_HEAD(&iop_chan->all_slots);
1383 iop_chan->common.device = dma_dev;
1384 dma_cookie_init(&iop_chan->common);
1385 list_add_tail(&iop_chan->common.device_node, &dma_dev->channels);
1387 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
1388 ret = iop_adma_memcpy_self_test(adev);
1389 dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret);
1391 goto err_free_iop_chan;
1394 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1395 ret = iop_adma_xor_val_self_test(adev);
1396 dev_dbg(&pdev->dev, "xor self test returned %d\n", ret);
1398 goto err_free_iop_chan;
1401 if (dma_has_cap(DMA_PQ, dma_dev->cap_mask) &&
1402 dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask)) {
1403 #ifdef CONFIG_RAID6_PQ
1404 ret = iop_adma_pq_zero_sum_self_test(adev);
1405 dev_dbg(&pdev->dev, "pq self test returned %d\n", ret);
1407 /* can not test raid6, so do not publish capability */
1408 dma_cap_clear(DMA_PQ, dma_dev->cap_mask);
1409 dma_cap_clear(DMA_PQ_VAL, dma_dev->cap_mask);
1413 goto err_free_iop_chan;
1416 dev_info(&pdev->dev, "Intel(R) IOP: ( %s%s%s%s%s%s)\n",
1417 dma_has_cap(DMA_PQ, dma_dev->cap_mask) ? "pq " : "",
1418 dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask) ? "pq_val " : "",
1419 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
1420 dma_has_cap(DMA_XOR_VAL, dma_dev->cap_mask) ? "xor_val " : "",
1421 dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
1422 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
1424 dma_async_device_register(dma_dev);
1430 dma_free_coherent(&adev->pdev->dev, plat_data->pool_size,
1431 adev->dma_desc_pool_virt, adev->dma_desc_pool);
1438 static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan)
1440 struct iop_adma_desc_slot *sw_desc, *grp_start;
1441 dma_cookie_t cookie;
1442 int slot_cnt, slots_per_op;
1444 dev_dbg(iop_chan->device->common.dev, "%s\n", __func__);
1446 spin_lock_bh(&iop_chan->lock);
1447 slot_cnt = iop_chan_memcpy_slot_count(0, &slots_per_op);
1448 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
1450 grp_start = sw_desc->group_head;
1452 list_splice_init(&sw_desc->tx_list, &iop_chan->chain);
1453 async_tx_ack(&sw_desc->async_tx);
1454 iop_desc_init_memcpy(grp_start, 0);
1455 iop_desc_set_byte_count(grp_start, iop_chan, 0);
1456 iop_desc_set_dest_addr(grp_start, iop_chan, 0);
1457 iop_desc_set_memcpy_src_addr(grp_start, 0);
1459 cookie = dma_cookie_assign(&sw_desc->async_tx);
1461 /* initialize the completed cookie to be less than
1462 * the most recently used cookie
1464 iop_chan->common.completed_cookie = cookie - 1;
1466 /* channel should not be busy */
1467 BUG_ON(iop_chan_is_busy(iop_chan));
1469 /* clear any prior error-status bits */
1470 iop_adma_device_clear_err_status(iop_chan);
1472 /* disable operation */
1473 iop_chan_disable(iop_chan);
1475 /* set the descriptor address */
1476 iop_chan_set_next_descriptor(iop_chan, sw_desc->async_tx.phys);
1478 /* 1/ don't add pre-chained descriptors
1479 * 2/ dummy read to flush next_desc write
1481 BUG_ON(iop_desc_get_next_desc(sw_desc));
1483 /* run the descriptor */
1484 iop_chan_enable(iop_chan);
1486 dev_err(iop_chan->device->common.dev,
1487 "failed to allocate null descriptor\n");
1488 spin_unlock_bh(&iop_chan->lock);
1491 static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan)
1493 struct iop_adma_desc_slot *sw_desc, *grp_start;
1494 dma_cookie_t cookie;
1495 int slot_cnt, slots_per_op;
1497 dev_dbg(iop_chan->device->common.dev, "%s\n", __func__);
1499 spin_lock_bh(&iop_chan->lock);
1500 slot_cnt = iop_chan_xor_slot_count(0, 2, &slots_per_op);
1501 sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
1503 grp_start = sw_desc->group_head;
1504 list_splice_init(&sw_desc->tx_list, &iop_chan->chain);
1505 async_tx_ack(&sw_desc->async_tx);
1506 iop_desc_init_null_xor(grp_start, 2, 0);
1507 iop_desc_set_byte_count(grp_start, iop_chan, 0);
1508 iop_desc_set_dest_addr(grp_start, iop_chan, 0);
1509 iop_desc_set_xor_src_addr(grp_start, 0, 0);
1510 iop_desc_set_xor_src_addr(grp_start, 1, 0);
1512 cookie = dma_cookie_assign(&sw_desc->async_tx);
1514 /* initialize the completed cookie to be less than
1515 * the most recently used cookie
1517 iop_chan->common.completed_cookie = cookie - 1;
1519 /* channel should not be busy */
1520 BUG_ON(iop_chan_is_busy(iop_chan));
1522 /* clear any prior error-status bits */
1523 iop_adma_device_clear_err_status(iop_chan);
1525 /* disable operation */
1526 iop_chan_disable(iop_chan);
1528 /* set the descriptor address */
1529 iop_chan_set_next_descriptor(iop_chan, sw_desc->async_tx.phys);
1531 /* 1/ don't add pre-chained descriptors
1532 * 2/ dummy read to flush next_desc write
1534 BUG_ON(iop_desc_get_next_desc(sw_desc));
1536 /* run the descriptor */
1537 iop_chan_enable(iop_chan);
1539 dev_err(iop_chan->device->common.dev,
1540 "failed to allocate null descriptor\n");
1541 spin_unlock_bh(&iop_chan->lock);
1544 static struct platform_driver iop_adma_driver = {
1545 .probe = iop_adma_probe,
1546 .remove = iop_adma_remove,
1552 module_platform_driver(iop_adma_driver);
1554 MODULE_AUTHOR("Intel Corporation");
1555 MODULE_DESCRIPTION("IOP ADMA Engine Driver");
1556 MODULE_LICENSE("GPL");
1557 MODULE_ALIAS("platform:iop-adma");