]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/dma/iop-adma.c
dmaengine: iop-adma: use correct printk format strings
[linux.git] / drivers / dma / iop-adma.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * offload engine driver for the Intel Xscale series of i/o processors
4  * Copyright © 2006, Intel Corporation.
5  */
6
7 /*
8  * This driver supports the asynchrounous DMA copy and RAID engines available
9  * on the Intel Xscale(R) family of I/O Processors (IOP 32x, 33x, 134x)
10  */
11
12 #include <linux/init.h>
13 #include <linux/module.h>
14 #include <linux/delay.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/spinlock.h>
17 #include <linux/interrupt.h>
18 #include <linux/platform_device.h>
19 #include <linux/prefetch.h>
20 #include <linux/memory.h>
21 #include <linux/ioport.h>
22 #include <linux/raid/pq.h>
23 #include <linux/slab.h>
24
25 #include <mach/adma.h>
26
27 #include "dmaengine.h"
28
29 #define to_iop_adma_chan(chan) container_of(chan, struct iop_adma_chan, common)
30 #define to_iop_adma_device(dev) \
31         container_of(dev, struct iop_adma_device, common)
32 #define tx_to_iop_adma_slot(tx) \
33         container_of(tx, struct iop_adma_desc_slot, async_tx)
34
35 /**
36  * iop_adma_free_slots - flags descriptor slots for reuse
37  * @slot: Slot to free
38  * Caller must hold &iop_chan->lock while calling this function
39  */
40 static void iop_adma_free_slots(struct iop_adma_desc_slot *slot)
41 {
42         int stride = slot->slots_per_op;
43
44         while (stride--) {
45                 slot->slots_per_op = 0;
46                 slot = list_entry(slot->slot_node.next,
47                                 struct iop_adma_desc_slot,
48                                 slot_node);
49         }
50 }
51
52 static dma_cookie_t
53 iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc,
54         struct iop_adma_chan *iop_chan, dma_cookie_t cookie)
55 {
56         struct dma_async_tx_descriptor *tx = &desc->async_tx;
57
58         BUG_ON(tx->cookie < 0);
59         if (tx->cookie > 0) {
60                 cookie = tx->cookie;
61                 tx->cookie = 0;
62
63                 /* call the callback (must not sleep or submit new
64                  * operations to this channel)
65                  */
66                 dmaengine_desc_get_callback_invoke(tx, NULL);
67
68                 dma_descriptor_unmap(tx);
69                 if (desc->group_head)
70                         desc->group_head = NULL;
71         }
72
73         /* run dependent operations */
74         dma_run_dependencies(tx);
75
76         return cookie;
77 }
78
79 static int
80 iop_adma_clean_slot(struct iop_adma_desc_slot *desc,
81         struct iop_adma_chan *iop_chan)
82 {
83         /* the client is allowed to attach dependent operations
84          * until 'ack' is set
85          */
86         if (!async_tx_test_ack(&desc->async_tx))
87                 return 0;
88
89         /* leave the last descriptor in the chain
90          * so we can append to it
91          */
92         if (desc->chain_node.next == &iop_chan->chain)
93                 return 1;
94
95         dev_dbg(iop_chan->device->common.dev,
96                 "\tfree slot: %d slots_per_op: %d\n",
97                 desc->idx, desc->slots_per_op);
98
99         list_del(&desc->chain_node);
100         iop_adma_free_slots(desc);
101
102         return 0;
103 }
104
105 static void __iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan)
106 {
107         struct iop_adma_desc_slot *iter, *_iter, *grp_start = NULL;
108         dma_cookie_t cookie = 0;
109         u32 current_desc = iop_chan_get_current_descriptor(iop_chan);
110         int busy = iop_chan_is_busy(iop_chan);
111         int seen_current = 0, slot_cnt = 0, slots_per_op = 0;
112
113         dev_dbg(iop_chan->device->common.dev, "%s\n", __func__);
114         /* free completed slots from the chain starting with
115          * the oldest descriptor
116          */
117         list_for_each_entry_safe(iter, _iter, &iop_chan->chain,
118                                         chain_node) {
119                 pr_debug("\tcookie: %d slot: %d busy: %d "
120                         "this_desc: %#x next_desc: %#llx ack: %d\n",
121                         iter->async_tx.cookie, iter->idx, busy,
122                         iter->async_tx.phys, (u64)iop_desc_get_next_desc(iter),
123                         async_tx_test_ack(&iter->async_tx));
124                 prefetch(_iter);
125                 prefetch(&_iter->async_tx);
126
127                 /* do not advance past the current descriptor loaded into the
128                  * hardware channel, subsequent descriptors are either in
129                  * process or have not been submitted
130                  */
131                 if (seen_current)
132                         break;
133
134                 /* stop the search if we reach the current descriptor and the
135                  * channel is busy, or if it appears that the current descriptor
136                  * needs to be re-read (i.e. has been appended to)
137                  */
138                 if (iter->async_tx.phys == current_desc) {
139                         BUG_ON(seen_current++);
140                         if (busy || iop_desc_get_next_desc(iter))
141                                 break;
142                 }
143
144                 /* detect the start of a group transaction */
145                 if (!slot_cnt && !slots_per_op) {
146                         slot_cnt = iter->slot_cnt;
147                         slots_per_op = iter->slots_per_op;
148                         if (slot_cnt <= slots_per_op) {
149                                 slot_cnt = 0;
150                                 slots_per_op = 0;
151                         }
152                 }
153
154                 if (slot_cnt) {
155                         pr_debug("\tgroup++\n");
156                         if (!grp_start)
157                                 grp_start = iter;
158                         slot_cnt -= slots_per_op;
159                 }
160
161                 /* all the members of a group are complete */
162                 if (slots_per_op != 0 && slot_cnt == 0) {
163                         struct iop_adma_desc_slot *grp_iter, *_grp_iter;
164                         int end_of_chain = 0;
165                         pr_debug("\tgroup end\n");
166
167                         /* collect the total results */
168                         if (grp_start->xor_check_result) {
169                                 u32 zero_sum_result = 0;
170                                 slot_cnt = grp_start->slot_cnt;
171                                 grp_iter = grp_start;
172
173                                 list_for_each_entry_from(grp_iter,
174                                         &iop_chan->chain, chain_node) {
175                                         zero_sum_result |=
176                                             iop_desc_get_zero_result(grp_iter);
177                                             pr_debug("\titer%d result: %d\n",
178                                             grp_iter->idx, zero_sum_result);
179                                         slot_cnt -= slots_per_op;
180                                         if (slot_cnt == 0)
181                                                 break;
182                                 }
183                                 pr_debug("\tgrp_start->xor_check_result: %p\n",
184                                         grp_start->xor_check_result);
185                                 *grp_start->xor_check_result = zero_sum_result;
186                         }
187
188                         /* clean up the group */
189                         slot_cnt = grp_start->slot_cnt;
190                         grp_iter = grp_start;
191                         list_for_each_entry_safe_from(grp_iter, _grp_iter,
192                                 &iop_chan->chain, chain_node) {
193                                 cookie = iop_adma_run_tx_complete_actions(
194                                         grp_iter, iop_chan, cookie);
195
196                                 slot_cnt -= slots_per_op;
197                                 end_of_chain = iop_adma_clean_slot(grp_iter,
198                                         iop_chan);
199
200                                 if (slot_cnt == 0 || end_of_chain)
201                                         break;
202                         }
203
204                         /* the group should be complete at this point */
205                         BUG_ON(slot_cnt);
206
207                         slots_per_op = 0;
208                         grp_start = NULL;
209                         if (end_of_chain)
210                                 break;
211                         else
212                                 continue;
213                 } else if (slots_per_op) /* wait for group completion */
214                         continue;
215
216                 /* write back zero sum results (single descriptor case) */
217                 if (iter->xor_check_result && iter->async_tx.cookie)
218                         *iter->xor_check_result =
219                                 iop_desc_get_zero_result(iter);
220
221                 cookie = iop_adma_run_tx_complete_actions(
222                                         iter, iop_chan, cookie);
223
224                 if (iop_adma_clean_slot(iter, iop_chan))
225                         break;
226         }
227
228         if (cookie > 0) {
229                 iop_chan->common.completed_cookie = cookie;
230                 pr_debug("\tcompleted cookie %d\n", cookie);
231         }
232 }
233
234 static void
235 iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan)
236 {
237         spin_lock_bh(&iop_chan->lock);
238         __iop_adma_slot_cleanup(iop_chan);
239         spin_unlock_bh(&iop_chan->lock);
240 }
241
242 static void iop_adma_tasklet(unsigned long data)
243 {
244         struct iop_adma_chan *iop_chan = (struct iop_adma_chan *) data;
245
246         /* lockdep will flag depedency submissions as potentially
247          * recursive locking, this is not the case as a dependency
248          * submission will never recurse a channels submit routine.
249          * There are checks in async_tx.c to prevent this.
250          */
251         spin_lock_nested(&iop_chan->lock, SINGLE_DEPTH_NESTING);
252         __iop_adma_slot_cleanup(iop_chan);
253         spin_unlock(&iop_chan->lock);
254 }
255
256 static struct iop_adma_desc_slot *
257 iop_adma_alloc_slots(struct iop_adma_chan *iop_chan, int num_slots,
258                         int slots_per_op)
259 {
260         struct iop_adma_desc_slot *iter, *_iter, *alloc_start = NULL;
261         LIST_HEAD(chain);
262         int slots_found, retry = 0;
263
264         /* start search from the last allocated descrtiptor
265          * if a contiguous allocation can not be found start searching
266          * from the beginning of the list
267          */
268 retry:
269         slots_found = 0;
270         if (retry == 0)
271                 iter = iop_chan->last_used;
272         else
273                 iter = list_entry(&iop_chan->all_slots,
274                         struct iop_adma_desc_slot,
275                         slot_node);
276
277         list_for_each_entry_safe_continue(
278                 iter, _iter, &iop_chan->all_slots, slot_node) {
279                 prefetch(_iter);
280                 prefetch(&_iter->async_tx);
281                 if (iter->slots_per_op) {
282                         /* give up after finding the first busy slot
283                          * on the second pass through the list
284                          */
285                         if (retry)
286                                 break;
287
288                         slots_found = 0;
289                         continue;
290                 }
291
292                 /* start the allocation if the slot is correctly aligned */
293                 if (!slots_found++) {
294                         if (iop_desc_is_aligned(iter, slots_per_op))
295                                 alloc_start = iter;
296                         else {
297                                 slots_found = 0;
298                                 continue;
299                         }
300                 }
301
302                 if (slots_found == num_slots) {
303                         struct iop_adma_desc_slot *alloc_tail = NULL;
304                         struct iop_adma_desc_slot *last_used = NULL;
305                         iter = alloc_start;
306                         while (num_slots) {
307                                 int i;
308                                 dev_dbg(iop_chan->device->common.dev,
309                                         "allocated slot: %d "
310                                         "(desc %p phys: %#llx) slots_per_op %d\n",
311                                         iter->idx, iter->hw_desc,
312                                         (u64)iter->async_tx.phys, slots_per_op);
313
314                                 /* pre-ack all but the last descriptor */
315                                 if (num_slots != slots_per_op)
316                                         async_tx_ack(&iter->async_tx);
317
318                                 list_add_tail(&iter->chain_node, &chain);
319                                 alloc_tail = iter;
320                                 iter->async_tx.cookie = 0;
321                                 iter->slot_cnt = num_slots;
322                                 iter->xor_check_result = NULL;
323                                 for (i = 0; i < slots_per_op; i++) {
324                                         iter->slots_per_op = slots_per_op - i;
325                                         last_used = iter;
326                                         iter = list_entry(iter->slot_node.next,
327                                                 struct iop_adma_desc_slot,
328                                                 slot_node);
329                                 }
330                                 num_slots -= slots_per_op;
331                         }
332                         alloc_tail->group_head = alloc_start;
333                         alloc_tail->async_tx.cookie = -EBUSY;
334                         list_splice(&chain, &alloc_tail->tx_list);
335                         iop_chan->last_used = last_used;
336                         iop_desc_clear_next_desc(alloc_start);
337                         iop_desc_clear_next_desc(alloc_tail);
338                         return alloc_tail;
339                 }
340         }
341         if (!retry++)
342                 goto retry;
343
344         /* perform direct reclaim if the allocation fails */
345         __iop_adma_slot_cleanup(iop_chan);
346
347         return NULL;
348 }
349
350 static void iop_adma_check_threshold(struct iop_adma_chan *iop_chan)
351 {
352         dev_dbg(iop_chan->device->common.dev, "pending: %d\n",
353                 iop_chan->pending);
354
355         if (iop_chan->pending >= IOP_ADMA_THRESHOLD) {
356                 iop_chan->pending = 0;
357                 iop_chan_append(iop_chan);
358         }
359 }
360
361 static dma_cookie_t
362 iop_adma_tx_submit(struct dma_async_tx_descriptor *tx)
363 {
364         struct iop_adma_desc_slot *sw_desc = tx_to_iop_adma_slot(tx);
365         struct iop_adma_chan *iop_chan = to_iop_adma_chan(tx->chan);
366         struct iop_adma_desc_slot *grp_start, *old_chain_tail;
367         int slot_cnt;
368         int slots_per_op;
369         dma_cookie_t cookie;
370         dma_addr_t next_dma;
371
372         grp_start = sw_desc->group_head;
373         slot_cnt = grp_start->slot_cnt;
374         slots_per_op = grp_start->slots_per_op;
375
376         spin_lock_bh(&iop_chan->lock);
377         cookie = dma_cookie_assign(tx);
378
379         old_chain_tail = list_entry(iop_chan->chain.prev,
380                 struct iop_adma_desc_slot, chain_node);
381         list_splice_init(&sw_desc->tx_list,
382                          &old_chain_tail->chain_node);
383
384         /* fix up the hardware chain */
385         next_dma = grp_start->async_tx.phys;
386         iop_desc_set_next_desc(old_chain_tail, next_dma);
387         BUG_ON(iop_desc_get_next_desc(old_chain_tail) != next_dma); /* flush */
388
389         /* check for pre-chained descriptors */
390         iop_paranoia(iop_desc_get_next_desc(sw_desc));
391
392         /* increment the pending count by the number of slots
393          * memcpy operations have a 1:1 (slot:operation) relation
394          * other operations are heavier and will pop the threshold
395          * more often.
396          */
397         iop_chan->pending += slot_cnt;
398         iop_adma_check_threshold(iop_chan);
399         spin_unlock_bh(&iop_chan->lock);
400
401         dev_dbg(iop_chan->device->common.dev, "%s cookie: %d slot: %d\n",
402                 __func__, sw_desc->async_tx.cookie, sw_desc->idx);
403
404         return cookie;
405 }
406
407 static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan);
408 static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan);
409
410 /**
411  * iop_adma_alloc_chan_resources -  returns the number of allocated descriptors
412  * @chan - allocate descriptor resources for this channel
413  * @client - current client requesting the channel be ready for requests
414  *
415  * Note: We keep the slots for 1 operation on iop_chan->chain at all times.  To
416  * avoid deadlock, via async_xor, num_descs_in_pool must at a minimum be
417  * greater than 2x the number slots needed to satisfy a device->max_xor
418  * request.
419  * */
420 static int iop_adma_alloc_chan_resources(struct dma_chan *chan)
421 {
422         char *hw_desc;
423         int idx;
424         struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
425         struct iop_adma_desc_slot *slot = NULL;
426         int init = iop_chan->slots_allocated ? 0 : 1;
427         struct iop_adma_platform_data *plat_data =
428                 dev_get_platdata(&iop_chan->device->pdev->dev);
429         int num_descs_in_pool = plat_data->pool_size/IOP_ADMA_SLOT_SIZE;
430
431         /* Allocate descriptor slots */
432         do {
433                 idx = iop_chan->slots_allocated;
434                 if (idx == num_descs_in_pool)
435                         break;
436
437                 slot = kzalloc(sizeof(*slot), GFP_KERNEL);
438                 if (!slot) {
439                         printk(KERN_INFO "IOP ADMA Channel only initialized"
440                                 " %d descriptor slots", idx);
441                         break;
442                 }
443                 hw_desc = (char *) iop_chan->device->dma_desc_pool_virt;
444                 slot->hw_desc = (void *) &hw_desc[idx * IOP_ADMA_SLOT_SIZE];
445
446                 dma_async_tx_descriptor_init(&slot->async_tx, chan);
447                 slot->async_tx.tx_submit = iop_adma_tx_submit;
448                 INIT_LIST_HEAD(&slot->tx_list);
449                 INIT_LIST_HEAD(&slot->chain_node);
450                 INIT_LIST_HEAD(&slot->slot_node);
451                 hw_desc = (char *) iop_chan->device->dma_desc_pool;
452                 slot->async_tx.phys =
453                         (dma_addr_t) &hw_desc[idx * IOP_ADMA_SLOT_SIZE];
454                 slot->idx = idx;
455
456                 spin_lock_bh(&iop_chan->lock);
457                 iop_chan->slots_allocated++;
458                 list_add_tail(&slot->slot_node, &iop_chan->all_slots);
459                 spin_unlock_bh(&iop_chan->lock);
460         } while (iop_chan->slots_allocated < num_descs_in_pool);
461
462         if (idx && !iop_chan->last_used)
463                 iop_chan->last_used = list_entry(iop_chan->all_slots.next,
464                                         struct iop_adma_desc_slot,
465                                         slot_node);
466
467         dev_dbg(iop_chan->device->common.dev,
468                 "allocated %d descriptor slots last_used: %p\n",
469                 iop_chan->slots_allocated, iop_chan->last_used);
470
471         /* initialize the channel and the chain with a null operation */
472         if (init) {
473                 if (dma_has_cap(DMA_MEMCPY,
474                         iop_chan->device->common.cap_mask))
475                         iop_chan_start_null_memcpy(iop_chan);
476                 else if (dma_has_cap(DMA_XOR,
477                         iop_chan->device->common.cap_mask))
478                         iop_chan_start_null_xor(iop_chan);
479                 else
480                         BUG();
481         }
482
483         return (idx > 0) ? idx : -ENOMEM;
484 }
485
486 static struct dma_async_tx_descriptor *
487 iop_adma_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags)
488 {
489         struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
490         struct iop_adma_desc_slot *sw_desc, *grp_start;
491         int slot_cnt, slots_per_op;
492
493         dev_dbg(iop_chan->device->common.dev, "%s\n", __func__);
494
495         spin_lock_bh(&iop_chan->lock);
496         slot_cnt = iop_chan_interrupt_slot_count(&slots_per_op, iop_chan);
497         sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
498         if (sw_desc) {
499                 grp_start = sw_desc->group_head;
500                 iop_desc_init_interrupt(grp_start, iop_chan);
501                 sw_desc->async_tx.flags = flags;
502         }
503         spin_unlock_bh(&iop_chan->lock);
504
505         return sw_desc ? &sw_desc->async_tx : NULL;
506 }
507
508 static struct dma_async_tx_descriptor *
509 iop_adma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest,
510                          dma_addr_t dma_src, size_t len, unsigned long flags)
511 {
512         struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
513         struct iop_adma_desc_slot *sw_desc, *grp_start;
514         int slot_cnt, slots_per_op;
515
516         if (unlikely(!len))
517                 return NULL;
518         BUG_ON(len > IOP_ADMA_MAX_BYTE_COUNT);
519
520         dev_dbg(iop_chan->device->common.dev, "%s len: %zu\n",
521                 __func__, len);
522
523         spin_lock_bh(&iop_chan->lock);
524         slot_cnt = iop_chan_memcpy_slot_count(len, &slots_per_op);
525         sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
526         if (sw_desc) {
527                 grp_start = sw_desc->group_head;
528                 iop_desc_init_memcpy(grp_start, flags);
529                 iop_desc_set_byte_count(grp_start, iop_chan, len);
530                 iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest);
531                 iop_desc_set_memcpy_src_addr(grp_start, dma_src);
532                 sw_desc->async_tx.flags = flags;
533         }
534         spin_unlock_bh(&iop_chan->lock);
535
536         return sw_desc ? &sw_desc->async_tx : NULL;
537 }
538
539 static struct dma_async_tx_descriptor *
540 iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest,
541                       dma_addr_t *dma_src, unsigned int src_cnt, size_t len,
542                       unsigned long flags)
543 {
544         struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
545         struct iop_adma_desc_slot *sw_desc, *grp_start;
546         int slot_cnt, slots_per_op;
547
548         if (unlikely(!len))
549                 return NULL;
550         BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT);
551
552         dev_dbg(iop_chan->device->common.dev,
553                 "%s src_cnt: %d len: %zu flags: %lx\n",
554                 __func__, src_cnt, len, flags);
555
556         spin_lock_bh(&iop_chan->lock);
557         slot_cnt = iop_chan_xor_slot_count(len, src_cnt, &slots_per_op);
558         sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
559         if (sw_desc) {
560                 grp_start = sw_desc->group_head;
561                 iop_desc_init_xor(grp_start, src_cnt, flags);
562                 iop_desc_set_byte_count(grp_start, iop_chan, len);
563                 iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest);
564                 sw_desc->async_tx.flags = flags;
565                 while (src_cnt--)
566                         iop_desc_set_xor_src_addr(grp_start, src_cnt,
567                                                   dma_src[src_cnt]);
568         }
569         spin_unlock_bh(&iop_chan->lock);
570
571         return sw_desc ? &sw_desc->async_tx : NULL;
572 }
573
574 static struct dma_async_tx_descriptor *
575 iop_adma_prep_dma_xor_val(struct dma_chan *chan, dma_addr_t *dma_src,
576                           unsigned int src_cnt, size_t len, u32 *result,
577                           unsigned long flags)
578 {
579         struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
580         struct iop_adma_desc_slot *sw_desc, *grp_start;
581         int slot_cnt, slots_per_op;
582
583         if (unlikely(!len))
584                 return NULL;
585
586         dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %zu\n",
587                 __func__, src_cnt, len);
588
589         spin_lock_bh(&iop_chan->lock);
590         slot_cnt = iop_chan_zero_sum_slot_count(len, src_cnt, &slots_per_op);
591         sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
592         if (sw_desc) {
593                 grp_start = sw_desc->group_head;
594                 iop_desc_init_zero_sum(grp_start, src_cnt, flags);
595                 iop_desc_set_zero_sum_byte_count(grp_start, len);
596                 grp_start->xor_check_result = result;
597                 pr_debug("\t%s: grp_start->xor_check_result: %p\n",
598                         __func__, grp_start->xor_check_result);
599                 sw_desc->async_tx.flags = flags;
600                 while (src_cnt--)
601                         iop_desc_set_zero_sum_src_addr(grp_start, src_cnt,
602                                                        dma_src[src_cnt]);
603         }
604         spin_unlock_bh(&iop_chan->lock);
605
606         return sw_desc ? &sw_desc->async_tx : NULL;
607 }
608
609 static struct dma_async_tx_descriptor *
610 iop_adma_prep_dma_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
611                      unsigned int src_cnt, const unsigned char *scf, size_t len,
612                      unsigned long flags)
613 {
614         struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
615         struct iop_adma_desc_slot *sw_desc, *g;
616         int slot_cnt, slots_per_op;
617         int continue_srcs;
618
619         if (unlikely(!len))
620                 return NULL;
621         BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT);
622
623         dev_dbg(iop_chan->device->common.dev,
624                 "%s src_cnt: %d len: %zu flags: %lx\n",
625                 __func__, src_cnt, len, flags);
626
627         if (dmaf_p_disabled_continue(flags))
628                 continue_srcs = 1+src_cnt;
629         else if (dmaf_continue(flags))
630                 continue_srcs = 3+src_cnt;
631         else
632                 continue_srcs = 0+src_cnt;
633
634         spin_lock_bh(&iop_chan->lock);
635         slot_cnt = iop_chan_pq_slot_count(len, continue_srcs, &slots_per_op);
636         sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
637         if (sw_desc) {
638                 int i;
639
640                 g = sw_desc->group_head;
641                 iop_desc_set_byte_count(g, iop_chan, len);
642
643                 /* even if P is disabled its destination address (bits
644                  * [3:0]) must match Q.  It is ok if P points to an
645                  * invalid address, it won't be written.
646                  */
647                 if (flags & DMA_PREP_PQ_DISABLE_P)
648                         dst[0] = dst[1] & 0x7;
649
650                 iop_desc_set_pq_addr(g, dst);
651                 sw_desc->async_tx.flags = flags;
652                 for (i = 0; i < src_cnt; i++)
653                         iop_desc_set_pq_src_addr(g, i, src[i], scf[i]);
654
655                 /* if we are continuing a previous operation factor in
656                  * the old p and q values, see the comment for dma_maxpq
657                  * in include/linux/dmaengine.h
658                  */
659                 if (dmaf_p_disabled_continue(flags))
660                         iop_desc_set_pq_src_addr(g, i++, dst[1], 1);
661                 else if (dmaf_continue(flags)) {
662                         iop_desc_set_pq_src_addr(g, i++, dst[0], 0);
663                         iop_desc_set_pq_src_addr(g, i++, dst[1], 1);
664                         iop_desc_set_pq_src_addr(g, i++, dst[1], 0);
665                 }
666                 iop_desc_init_pq(g, i, flags);
667         }
668         spin_unlock_bh(&iop_chan->lock);
669
670         return sw_desc ? &sw_desc->async_tx : NULL;
671 }
672
673 static struct dma_async_tx_descriptor *
674 iop_adma_prep_dma_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
675                          unsigned int src_cnt, const unsigned char *scf,
676                          size_t len, enum sum_check_flags *pqres,
677                          unsigned long flags)
678 {
679         struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
680         struct iop_adma_desc_slot *sw_desc, *g;
681         int slot_cnt, slots_per_op;
682
683         if (unlikely(!len))
684                 return NULL;
685         BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT);
686
687         dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %zu\n",
688                 __func__, src_cnt, len);
689
690         spin_lock_bh(&iop_chan->lock);
691         slot_cnt = iop_chan_pq_zero_sum_slot_count(len, src_cnt + 2, &slots_per_op);
692         sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
693         if (sw_desc) {
694                 /* for validate operations p and q are tagged onto the
695                  * end of the source list
696                  */
697                 int pq_idx = src_cnt;
698
699                 g = sw_desc->group_head;
700                 iop_desc_init_pq_zero_sum(g, src_cnt+2, flags);
701                 iop_desc_set_pq_zero_sum_byte_count(g, len);
702                 g->pq_check_result = pqres;
703                 pr_debug("\t%s: g->pq_check_result: %p\n",
704                         __func__, g->pq_check_result);
705                 sw_desc->async_tx.flags = flags;
706                 while (src_cnt--)
707                         iop_desc_set_pq_zero_sum_src_addr(g, src_cnt,
708                                                           src[src_cnt],
709                                                           scf[src_cnt]);
710                 iop_desc_set_pq_zero_sum_addr(g, pq_idx, src);
711         }
712         spin_unlock_bh(&iop_chan->lock);
713
714         return sw_desc ? &sw_desc->async_tx : NULL;
715 }
716
717 static void iop_adma_free_chan_resources(struct dma_chan *chan)
718 {
719         struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
720         struct iop_adma_desc_slot *iter, *_iter;
721         int in_use_descs = 0;
722
723         iop_adma_slot_cleanup(iop_chan);
724
725         spin_lock_bh(&iop_chan->lock);
726         list_for_each_entry_safe(iter, _iter, &iop_chan->chain,
727                                         chain_node) {
728                 in_use_descs++;
729                 list_del(&iter->chain_node);
730         }
731         list_for_each_entry_safe_reverse(
732                 iter, _iter, &iop_chan->all_slots, slot_node) {
733                 list_del(&iter->slot_node);
734                 kfree(iter);
735                 iop_chan->slots_allocated--;
736         }
737         iop_chan->last_used = NULL;
738
739         dev_dbg(iop_chan->device->common.dev, "%s slots_allocated %d\n",
740                 __func__, iop_chan->slots_allocated);
741         spin_unlock_bh(&iop_chan->lock);
742
743         /* one is ok since we left it on there on purpose */
744         if (in_use_descs > 1)
745                 printk(KERN_ERR "IOP: Freeing %d in use descriptors!\n",
746                         in_use_descs - 1);
747 }
748
749 /**
750  * iop_adma_status - poll the status of an ADMA transaction
751  * @chan: ADMA channel handle
752  * @cookie: ADMA transaction identifier
753  * @txstate: a holder for the current state of the channel or NULL
754  */
755 static enum dma_status iop_adma_status(struct dma_chan *chan,
756                                         dma_cookie_t cookie,
757                                         struct dma_tx_state *txstate)
758 {
759         struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
760         int ret;
761
762         ret = dma_cookie_status(chan, cookie, txstate);
763         if (ret == DMA_COMPLETE)
764                 return ret;
765
766         iop_adma_slot_cleanup(iop_chan);
767
768         return dma_cookie_status(chan, cookie, txstate);
769 }
770
771 static irqreturn_t iop_adma_eot_handler(int irq, void *data)
772 {
773         struct iop_adma_chan *chan = data;
774
775         dev_dbg(chan->device->common.dev, "%s\n", __func__);
776
777         tasklet_schedule(&chan->irq_tasklet);
778
779         iop_adma_device_clear_eot_status(chan);
780
781         return IRQ_HANDLED;
782 }
783
784 static irqreturn_t iop_adma_eoc_handler(int irq, void *data)
785 {
786         struct iop_adma_chan *chan = data;
787
788         dev_dbg(chan->device->common.dev, "%s\n", __func__);
789
790         tasklet_schedule(&chan->irq_tasklet);
791
792         iop_adma_device_clear_eoc_status(chan);
793
794         return IRQ_HANDLED;
795 }
796
797 static irqreturn_t iop_adma_err_handler(int irq, void *data)
798 {
799         struct iop_adma_chan *chan = data;
800         unsigned long status = iop_chan_get_status(chan);
801
802         dev_err(chan->device->common.dev,
803                 "error ( %s%s%s%s%s%s%s)\n",
804                 iop_is_err_int_parity(status, chan) ? "int_parity " : "",
805                 iop_is_err_mcu_abort(status, chan) ? "mcu_abort " : "",
806                 iop_is_err_int_tabort(status, chan) ? "int_tabort " : "",
807                 iop_is_err_int_mabort(status, chan) ? "int_mabort " : "",
808                 iop_is_err_pci_tabort(status, chan) ? "pci_tabort " : "",
809                 iop_is_err_pci_mabort(status, chan) ? "pci_mabort " : "",
810                 iop_is_err_split_tx(status, chan) ? "split_tx " : "");
811
812         iop_adma_device_clear_err_status(chan);
813
814         BUG();
815
816         return IRQ_HANDLED;
817 }
818
819 static void iop_adma_issue_pending(struct dma_chan *chan)
820 {
821         struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
822
823         if (iop_chan->pending) {
824                 iop_chan->pending = 0;
825                 iop_chan_append(iop_chan);
826         }
827 }
828
829 /*
830  * Perform a transaction to verify the HW works.
831  */
832 #define IOP_ADMA_TEST_SIZE 2000
833
834 static int iop_adma_memcpy_self_test(struct iop_adma_device *device)
835 {
836         int i;
837         void *src, *dest;
838         dma_addr_t src_dma, dest_dma;
839         struct dma_chan *dma_chan;
840         dma_cookie_t cookie;
841         struct dma_async_tx_descriptor *tx;
842         int err = 0;
843         struct iop_adma_chan *iop_chan;
844
845         dev_dbg(device->common.dev, "%s\n", __func__);
846
847         src = kmalloc(IOP_ADMA_TEST_SIZE, GFP_KERNEL);
848         if (!src)
849                 return -ENOMEM;
850         dest = kzalloc(IOP_ADMA_TEST_SIZE, GFP_KERNEL);
851         if (!dest) {
852                 kfree(src);
853                 return -ENOMEM;
854         }
855
856         /* Fill in src buffer */
857         for (i = 0; i < IOP_ADMA_TEST_SIZE; i++)
858                 ((u8 *) src)[i] = (u8)i;
859
860         /* Start copy, using first DMA channel */
861         dma_chan = container_of(device->common.channels.next,
862                                 struct dma_chan,
863                                 device_node);
864         if (iop_adma_alloc_chan_resources(dma_chan) < 1) {
865                 err = -ENODEV;
866                 goto out;
867         }
868
869         dest_dma = dma_map_single(dma_chan->device->dev, dest,
870                                 IOP_ADMA_TEST_SIZE, DMA_FROM_DEVICE);
871         src_dma = dma_map_single(dma_chan->device->dev, src,
872                                 IOP_ADMA_TEST_SIZE, DMA_TO_DEVICE);
873         tx = iop_adma_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
874                                       IOP_ADMA_TEST_SIZE,
875                                       DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
876
877         cookie = iop_adma_tx_submit(tx);
878         iop_adma_issue_pending(dma_chan);
879         msleep(1);
880
881         if (iop_adma_status(dma_chan, cookie, NULL) !=
882                         DMA_COMPLETE) {
883                 dev_err(dma_chan->device->dev,
884                         "Self-test copy timed out, disabling\n");
885                 err = -ENODEV;
886                 goto free_resources;
887         }
888
889         iop_chan = to_iop_adma_chan(dma_chan);
890         dma_sync_single_for_cpu(&iop_chan->device->pdev->dev, dest_dma,
891                 IOP_ADMA_TEST_SIZE, DMA_FROM_DEVICE);
892         if (memcmp(src, dest, IOP_ADMA_TEST_SIZE)) {
893                 dev_err(dma_chan->device->dev,
894                         "Self-test copy failed compare, disabling\n");
895                 err = -ENODEV;
896                 goto free_resources;
897         }
898
899 free_resources:
900         iop_adma_free_chan_resources(dma_chan);
901 out:
902         kfree(src);
903         kfree(dest);
904         return err;
905 }
906
907 #define IOP_ADMA_NUM_SRC_TEST 4 /* must be <= 15 */
908 static int
909 iop_adma_xor_val_self_test(struct iop_adma_device *device)
910 {
911         int i, src_idx;
912         struct page *dest;
913         struct page *xor_srcs[IOP_ADMA_NUM_SRC_TEST];
914         struct page *zero_sum_srcs[IOP_ADMA_NUM_SRC_TEST + 1];
915         dma_addr_t dma_srcs[IOP_ADMA_NUM_SRC_TEST + 1];
916         dma_addr_t dest_dma;
917         struct dma_async_tx_descriptor *tx;
918         struct dma_chan *dma_chan;
919         dma_cookie_t cookie;
920         u8 cmp_byte = 0;
921         u32 cmp_word;
922         u32 zero_sum_result;
923         int err = 0;
924         struct iop_adma_chan *iop_chan;
925
926         dev_dbg(device->common.dev, "%s\n", __func__);
927
928         for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++) {
929                 xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
930                 if (!xor_srcs[src_idx]) {
931                         while (src_idx--)
932                                 __free_page(xor_srcs[src_idx]);
933                         return -ENOMEM;
934                 }
935         }
936
937         dest = alloc_page(GFP_KERNEL);
938         if (!dest) {
939                 while (src_idx--)
940                         __free_page(xor_srcs[src_idx]);
941                 return -ENOMEM;
942         }
943
944         /* Fill in src buffers */
945         for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++) {
946                 u8 *ptr = page_address(xor_srcs[src_idx]);
947                 for (i = 0; i < PAGE_SIZE; i++)
948                         ptr[i] = (1 << src_idx);
949         }
950
951         for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++)
952                 cmp_byte ^= (u8) (1 << src_idx);
953
954         cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
955                         (cmp_byte << 8) | cmp_byte;
956
957         memset(page_address(dest), 0, PAGE_SIZE);
958
959         dma_chan = container_of(device->common.channels.next,
960                                 struct dma_chan,
961                                 device_node);
962         if (iop_adma_alloc_chan_resources(dma_chan) < 1) {
963                 err = -ENODEV;
964                 goto out;
965         }
966
967         /* test xor */
968         dest_dma = dma_map_page(dma_chan->device->dev, dest, 0,
969                                 PAGE_SIZE, DMA_FROM_DEVICE);
970         for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++)
971                 dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
972                                            0, PAGE_SIZE, DMA_TO_DEVICE);
973         tx = iop_adma_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
974                                    IOP_ADMA_NUM_SRC_TEST, PAGE_SIZE,
975                                    DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
976
977         cookie = iop_adma_tx_submit(tx);
978         iop_adma_issue_pending(dma_chan);
979         msleep(8);
980
981         if (iop_adma_status(dma_chan, cookie, NULL) !=
982                 DMA_COMPLETE) {
983                 dev_err(dma_chan->device->dev,
984                         "Self-test xor timed out, disabling\n");
985                 err = -ENODEV;
986                 goto free_resources;
987         }
988
989         iop_chan = to_iop_adma_chan(dma_chan);
990         dma_sync_single_for_cpu(&iop_chan->device->pdev->dev, dest_dma,
991                 PAGE_SIZE, DMA_FROM_DEVICE);
992         for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
993                 u32 *ptr = page_address(dest);
994                 if (ptr[i] != cmp_word) {
995                         dev_err(dma_chan->device->dev,
996                                 "Self-test xor failed compare, disabling\n");
997                         err = -ENODEV;
998                         goto free_resources;
999                 }
1000         }
1001         dma_sync_single_for_device(&iop_chan->device->pdev->dev, dest_dma,
1002                 PAGE_SIZE, DMA_TO_DEVICE);
1003
1004         /* skip zero sum if the capability is not present */
1005         if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask))
1006                 goto free_resources;
1007
1008         /* zero sum the sources with the destintation page */
1009         for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++)
1010                 zero_sum_srcs[i] = xor_srcs[i];
1011         zero_sum_srcs[i] = dest;
1012
1013         zero_sum_result = 1;
1014
1015         for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 1; i++)
1016                 dma_srcs[i] = dma_map_page(dma_chan->device->dev,
1017                                            zero_sum_srcs[i], 0, PAGE_SIZE,
1018                                            DMA_TO_DEVICE);
1019         tx = iop_adma_prep_dma_xor_val(dma_chan, dma_srcs,
1020                                        IOP_ADMA_NUM_SRC_TEST + 1, PAGE_SIZE,
1021                                        &zero_sum_result,
1022                                        DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1023
1024         cookie = iop_adma_tx_submit(tx);
1025         iop_adma_issue_pending(dma_chan);
1026         msleep(8);
1027
1028         if (iop_adma_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
1029                 dev_err(dma_chan->device->dev,
1030                         "Self-test zero sum timed out, disabling\n");
1031                 err = -ENODEV;
1032                 goto free_resources;
1033         }
1034
1035         if (zero_sum_result != 0) {
1036                 dev_err(dma_chan->device->dev,
1037                         "Self-test zero sum failed compare, disabling\n");
1038                 err = -ENODEV;
1039                 goto free_resources;
1040         }
1041
1042         /* test for non-zero parity sum */
1043         zero_sum_result = 0;
1044         for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 1; i++)
1045                 dma_srcs[i] = dma_map_page(dma_chan->device->dev,
1046                                            zero_sum_srcs[i], 0, PAGE_SIZE,
1047                                            DMA_TO_DEVICE);
1048         tx = iop_adma_prep_dma_xor_val(dma_chan, dma_srcs,
1049                                        IOP_ADMA_NUM_SRC_TEST + 1, PAGE_SIZE,
1050                                        &zero_sum_result,
1051                                        DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1052
1053         cookie = iop_adma_tx_submit(tx);
1054         iop_adma_issue_pending(dma_chan);
1055         msleep(8);
1056
1057         if (iop_adma_status(dma_chan, cookie, NULL) != DMA_COMPLETE) {
1058                 dev_err(dma_chan->device->dev,
1059                         "Self-test non-zero sum timed out, disabling\n");
1060                 err = -ENODEV;
1061                 goto free_resources;
1062         }
1063
1064         if (zero_sum_result != 1) {
1065                 dev_err(dma_chan->device->dev,
1066                         "Self-test non-zero sum failed compare, disabling\n");
1067                 err = -ENODEV;
1068                 goto free_resources;
1069         }
1070
1071 free_resources:
1072         iop_adma_free_chan_resources(dma_chan);
1073 out:
1074         src_idx = IOP_ADMA_NUM_SRC_TEST;
1075         while (src_idx--)
1076                 __free_page(xor_srcs[src_idx]);
1077         __free_page(dest);
1078         return err;
1079 }
1080
1081 #ifdef CONFIG_RAID6_PQ
1082 static int
1083 iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device)
1084 {
1085         /* combined sources, software pq results, and extra hw pq results */
1086         struct page *pq[IOP_ADMA_NUM_SRC_TEST+2+2];
1087         /* ptr to the extra hw pq buffers defined above */
1088         struct page **pq_hw = &pq[IOP_ADMA_NUM_SRC_TEST+2];
1089         /* address conversion buffers (dma_map / page_address) */
1090         void *pq_sw[IOP_ADMA_NUM_SRC_TEST+2];
1091         dma_addr_t pq_src[IOP_ADMA_NUM_SRC_TEST+2];
1092         dma_addr_t *pq_dest = &pq_src[IOP_ADMA_NUM_SRC_TEST];
1093
1094         int i;
1095         struct dma_async_tx_descriptor *tx;
1096         struct dma_chan *dma_chan;
1097         dma_cookie_t cookie;
1098         u32 zero_sum_result;
1099         int err = 0;
1100         struct device *dev;
1101
1102         dev_dbg(device->common.dev, "%s\n", __func__);
1103
1104         for (i = 0; i < ARRAY_SIZE(pq); i++) {
1105                 pq[i] = alloc_page(GFP_KERNEL);
1106                 if (!pq[i]) {
1107                         while (i--)
1108                                 __free_page(pq[i]);
1109                         return -ENOMEM;
1110                 }
1111         }
1112
1113         /* Fill in src buffers */
1114         for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++) {
1115                 pq_sw[i] = page_address(pq[i]);
1116                 memset(pq_sw[i], 0x11111111 * (1<<i), PAGE_SIZE);
1117         }
1118         pq_sw[i] = page_address(pq[i]);
1119         pq_sw[i+1] = page_address(pq[i+1]);
1120
1121         dma_chan = container_of(device->common.channels.next,
1122                                 struct dma_chan,
1123                                 device_node);
1124         if (iop_adma_alloc_chan_resources(dma_chan) < 1) {
1125                 err = -ENODEV;
1126                 goto out;
1127         }
1128
1129         dev = dma_chan->device->dev;
1130
1131         /* initialize the dests */
1132         memset(page_address(pq_hw[0]), 0 , PAGE_SIZE);
1133         memset(page_address(pq_hw[1]), 0 , PAGE_SIZE);
1134
1135         /* test pq */
1136         pq_dest[0] = dma_map_page(dev, pq_hw[0], 0, PAGE_SIZE, DMA_FROM_DEVICE);
1137         pq_dest[1] = dma_map_page(dev, pq_hw[1], 0, PAGE_SIZE, DMA_FROM_DEVICE);
1138         for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++)
1139                 pq_src[i] = dma_map_page(dev, pq[i], 0, PAGE_SIZE,
1140                                          DMA_TO_DEVICE);
1141
1142         tx = iop_adma_prep_dma_pq(dma_chan, pq_dest, pq_src,
1143                                   IOP_ADMA_NUM_SRC_TEST, (u8 *)raid6_gfexp,
1144                                   PAGE_SIZE,
1145                                   DMA_PREP_INTERRUPT |
1146                                   DMA_CTRL_ACK);
1147
1148         cookie = iop_adma_tx_submit(tx);
1149         iop_adma_issue_pending(dma_chan);
1150         msleep(8);
1151
1152         if (iop_adma_status(dma_chan, cookie, NULL) !=
1153                 DMA_COMPLETE) {
1154                 dev_err(dev, "Self-test pq timed out, disabling\n");
1155                 err = -ENODEV;
1156                 goto free_resources;
1157         }
1158
1159         raid6_call.gen_syndrome(IOP_ADMA_NUM_SRC_TEST+2, PAGE_SIZE, pq_sw);
1160
1161         if (memcmp(pq_sw[IOP_ADMA_NUM_SRC_TEST],
1162                    page_address(pq_hw[0]), PAGE_SIZE) != 0) {
1163                 dev_err(dev, "Self-test p failed compare, disabling\n");
1164                 err = -ENODEV;
1165                 goto free_resources;
1166         }
1167         if (memcmp(pq_sw[IOP_ADMA_NUM_SRC_TEST+1],
1168                    page_address(pq_hw[1]), PAGE_SIZE) != 0) {
1169                 dev_err(dev, "Self-test q failed compare, disabling\n");
1170                 err = -ENODEV;
1171                 goto free_resources;
1172         }
1173
1174         /* test correct zero sum using the software generated pq values */
1175         for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 2; i++)
1176                 pq_src[i] = dma_map_page(dev, pq[i], 0, PAGE_SIZE,
1177                                          DMA_TO_DEVICE);
1178
1179         zero_sum_result = ~0;
1180         tx = iop_adma_prep_dma_pq_val(dma_chan, &pq_src[IOP_ADMA_NUM_SRC_TEST],
1181                                       pq_src, IOP_ADMA_NUM_SRC_TEST,
1182                                       raid6_gfexp, PAGE_SIZE, &zero_sum_result,
1183                                       DMA_PREP_INTERRUPT|DMA_CTRL_ACK);
1184
1185         cookie = iop_adma_tx_submit(tx);
1186         iop_adma_issue_pending(dma_chan);
1187         msleep(8);
1188
1189         if (iop_adma_status(dma_chan, cookie, NULL) !=
1190                 DMA_COMPLETE) {
1191                 dev_err(dev, "Self-test pq-zero-sum timed out, disabling\n");
1192                 err = -ENODEV;
1193                 goto free_resources;
1194         }
1195
1196         if (zero_sum_result != 0) {
1197                 dev_err(dev, "Self-test pq-zero-sum failed to validate: %x\n",
1198                         zero_sum_result);
1199                 err = -ENODEV;
1200                 goto free_resources;
1201         }
1202
1203         /* test incorrect zero sum */
1204         i = IOP_ADMA_NUM_SRC_TEST;
1205         memset(pq_sw[i] + 100, 0, 100);
1206         memset(pq_sw[i+1] + 200, 0, 200);
1207         for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 2; i++)
1208                 pq_src[i] = dma_map_page(dev, pq[i], 0, PAGE_SIZE,
1209                                          DMA_TO_DEVICE);
1210
1211         zero_sum_result = 0;
1212         tx = iop_adma_prep_dma_pq_val(dma_chan, &pq_src[IOP_ADMA_NUM_SRC_TEST],
1213                                       pq_src, IOP_ADMA_NUM_SRC_TEST,
1214                                       raid6_gfexp, PAGE_SIZE, &zero_sum_result,
1215                                       DMA_PREP_INTERRUPT|DMA_CTRL_ACK);
1216
1217         cookie = iop_adma_tx_submit(tx);
1218         iop_adma_issue_pending(dma_chan);
1219         msleep(8);
1220
1221         if (iop_adma_status(dma_chan, cookie, NULL) !=
1222                 DMA_COMPLETE) {
1223                 dev_err(dev, "Self-test !pq-zero-sum timed out, disabling\n");
1224                 err = -ENODEV;
1225                 goto free_resources;
1226         }
1227
1228         if (zero_sum_result != (SUM_CHECK_P_RESULT | SUM_CHECK_Q_RESULT)) {
1229                 dev_err(dev, "Self-test !pq-zero-sum failed to validate: %x\n",
1230                         zero_sum_result);
1231                 err = -ENODEV;
1232                 goto free_resources;
1233         }
1234
1235 free_resources:
1236         iop_adma_free_chan_resources(dma_chan);
1237 out:
1238         i = ARRAY_SIZE(pq);
1239         while (i--)
1240                 __free_page(pq[i]);
1241         return err;
1242 }
1243 #endif
1244
1245 static int iop_adma_remove(struct platform_device *dev)
1246 {
1247         struct iop_adma_device *device = platform_get_drvdata(dev);
1248         struct dma_chan *chan, *_chan;
1249         struct iop_adma_chan *iop_chan;
1250         struct iop_adma_platform_data *plat_data = dev_get_platdata(&dev->dev);
1251
1252         dma_async_device_unregister(&device->common);
1253
1254         dma_free_coherent(&dev->dev, plat_data->pool_size,
1255                         device->dma_desc_pool_virt, device->dma_desc_pool);
1256
1257         list_for_each_entry_safe(chan, _chan, &device->common.channels,
1258                                 device_node) {
1259                 iop_chan = to_iop_adma_chan(chan);
1260                 list_del(&chan->device_node);
1261                 kfree(iop_chan);
1262         }
1263         kfree(device);
1264
1265         return 0;
1266 }
1267
1268 static int iop_adma_probe(struct platform_device *pdev)
1269 {
1270         struct resource *res;
1271         int ret = 0, i;
1272         struct iop_adma_device *adev;
1273         struct iop_adma_chan *iop_chan;
1274         struct dma_device *dma_dev;
1275         struct iop_adma_platform_data *plat_data = dev_get_platdata(&pdev->dev);
1276
1277         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1278         if (!res)
1279                 return -ENODEV;
1280
1281         if (!devm_request_mem_region(&pdev->dev, res->start,
1282                                 resource_size(res), pdev->name))
1283                 return -EBUSY;
1284
1285         adev = kzalloc(sizeof(*adev), GFP_KERNEL);
1286         if (!adev)
1287                 return -ENOMEM;
1288         dma_dev = &adev->common;
1289
1290         /* allocate coherent memory for hardware descriptors
1291          * note: writecombine gives slightly better performance, but
1292          * requires that we explicitly flush the writes
1293          */
1294         adev->dma_desc_pool_virt = dma_alloc_wc(&pdev->dev,
1295                                                 plat_data->pool_size,
1296                                                 &adev->dma_desc_pool,
1297                                                 GFP_KERNEL);
1298         if (!adev->dma_desc_pool_virt) {
1299                 ret = -ENOMEM;
1300                 goto err_free_adev;
1301         }
1302
1303         dev_dbg(&pdev->dev, "%s: allocated descriptor pool virt %p phys %p\n",
1304                 __func__, adev->dma_desc_pool_virt,
1305                 (void *) adev->dma_desc_pool);
1306
1307         adev->id = plat_data->hw_id;
1308
1309         /* discover transaction capabilites from the platform data */
1310         dma_dev->cap_mask = plat_data->cap_mask;
1311
1312         adev->pdev = pdev;
1313         platform_set_drvdata(pdev, adev);
1314
1315         INIT_LIST_HEAD(&dma_dev->channels);
1316
1317         /* set base routines */
1318         dma_dev->device_alloc_chan_resources = iop_adma_alloc_chan_resources;
1319         dma_dev->device_free_chan_resources = iop_adma_free_chan_resources;
1320         dma_dev->device_tx_status = iop_adma_status;
1321         dma_dev->device_issue_pending = iop_adma_issue_pending;
1322         dma_dev->dev = &pdev->dev;
1323
1324         /* set prep routines based on capability */
1325         if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
1326                 dma_dev->device_prep_dma_memcpy = iop_adma_prep_dma_memcpy;
1327         if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1328                 dma_dev->max_xor = iop_adma_get_max_xor();
1329                 dma_dev->device_prep_dma_xor = iop_adma_prep_dma_xor;
1330         }
1331         if (dma_has_cap(DMA_XOR_VAL, dma_dev->cap_mask))
1332                 dma_dev->device_prep_dma_xor_val =
1333                         iop_adma_prep_dma_xor_val;
1334         if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
1335                 dma_set_maxpq(dma_dev, iop_adma_get_max_pq(), 0);
1336                 dma_dev->device_prep_dma_pq = iop_adma_prep_dma_pq;
1337         }
1338         if (dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask))
1339                 dma_dev->device_prep_dma_pq_val =
1340                         iop_adma_prep_dma_pq_val;
1341         if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask))
1342                 dma_dev->device_prep_dma_interrupt =
1343                         iop_adma_prep_dma_interrupt;
1344
1345         iop_chan = kzalloc(sizeof(*iop_chan), GFP_KERNEL);
1346         if (!iop_chan) {
1347                 ret = -ENOMEM;
1348                 goto err_free_dma;
1349         }
1350         iop_chan->device = adev;
1351
1352         iop_chan->mmr_base = devm_ioremap(&pdev->dev, res->start,
1353                                         resource_size(res));
1354         if (!iop_chan->mmr_base) {
1355                 ret = -ENOMEM;
1356                 goto err_free_iop_chan;
1357         }
1358         tasklet_init(&iop_chan->irq_tasklet, iop_adma_tasklet, (unsigned long)
1359                 iop_chan);
1360
1361         /* clear errors before enabling interrupts */
1362         iop_adma_device_clear_err_status(iop_chan);
1363
1364         for (i = 0; i < 3; i++) {
1365                 irq_handler_t handler[] = { iop_adma_eot_handler,
1366                                         iop_adma_eoc_handler,
1367                                         iop_adma_err_handler };
1368                 int irq = platform_get_irq(pdev, i);
1369                 if (irq < 0) {
1370                         ret = -ENXIO;
1371                         goto err_free_iop_chan;
1372                 } else {
1373                         ret = devm_request_irq(&pdev->dev, irq,
1374                                         handler[i], 0, pdev->name, iop_chan);
1375                         if (ret)
1376                                 goto err_free_iop_chan;
1377                 }
1378         }
1379
1380         spin_lock_init(&iop_chan->lock);
1381         INIT_LIST_HEAD(&iop_chan->chain);
1382         INIT_LIST_HEAD(&iop_chan->all_slots);
1383         iop_chan->common.device = dma_dev;
1384         dma_cookie_init(&iop_chan->common);
1385         list_add_tail(&iop_chan->common.device_node, &dma_dev->channels);
1386
1387         if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
1388                 ret = iop_adma_memcpy_self_test(adev);
1389                 dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret);
1390                 if (ret)
1391                         goto err_free_iop_chan;
1392         }
1393
1394         if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1395                 ret = iop_adma_xor_val_self_test(adev);
1396                 dev_dbg(&pdev->dev, "xor self test returned %d\n", ret);
1397                 if (ret)
1398                         goto err_free_iop_chan;
1399         }
1400
1401         if (dma_has_cap(DMA_PQ, dma_dev->cap_mask) &&
1402             dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask)) {
1403                 #ifdef CONFIG_RAID6_PQ
1404                 ret = iop_adma_pq_zero_sum_self_test(adev);
1405                 dev_dbg(&pdev->dev, "pq self test returned %d\n", ret);
1406                 #else
1407                 /* can not test raid6, so do not publish capability */
1408                 dma_cap_clear(DMA_PQ, dma_dev->cap_mask);
1409                 dma_cap_clear(DMA_PQ_VAL, dma_dev->cap_mask);
1410                 ret = 0;
1411                 #endif
1412                 if (ret)
1413                         goto err_free_iop_chan;
1414         }
1415
1416         dev_info(&pdev->dev, "Intel(R) IOP: ( %s%s%s%s%s%s)\n",
1417                  dma_has_cap(DMA_PQ, dma_dev->cap_mask) ? "pq " : "",
1418                  dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask) ? "pq_val " : "",
1419                  dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
1420                  dma_has_cap(DMA_XOR_VAL, dma_dev->cap_mask) ? "xor_val " : "",
1421                  dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
1422                  dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
1423
1424         dma_async_device_register(dma_dev);
1425         goto out;
1426
1427  err_free_iop_chan:
1428         kfree(iop_chan);
1429  err_free_dma:
1430         dma_free_coherent(&adev->pdev->dev, plat_data->pool_size,
1431                         adev->dma_desc_pool_virt, adev->dma_desc_pool);
1432  err_free_adev:
1433         kfree(adev);
1434  out:
1435         return ret;
1436 }
1437
1438 static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan)
1439 {
1440         struct iop_adma_desc_slot *sw_desc, *grp_start;
1441         dma_cookie_t cookie;
1442         int slot_cnt, slots_per_op;
1443
1444         dev_dbg(iop_chan->device->common.dev, "%s\n", __func__);
1445
1446         spin_lock_bh(&iop_chan->lock);
1447         slot_cnt = iop_chan_memcpy_slot_count(0, &slots_per_op);
1448         sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
1449         if (sw_desc) {
1450                 grp_start = sw_desc->group_head;
1451
1452                 list_splice_init(&sw_desc->tx_list, &iop_chan->chain);
1453                 async_tx_ack(&sw_desc->async_tx);
1454                 iop_desc_init_memcpy(grp_start, 0);
1455                 iop_desc_set_byte_count(grp_start, iop_chan, 0);
1456                 iop_desc_set_dest_addr(grp_start, iop_chan, 0);
1457                 iop_desc_set_memcpy_src_addr(grp_start, 0);
1458
1459                 cookie = dma_cookie_assign(&sw_desc->async_tx);
1460
1461                 /* initialize the completed cookie to be less than
1462                  * the most recently used cookie
1463                  */
1464                 iop_chan->common.completed_cookie = cookie - 1;
1465
1466                 /* channel should not be busy */
1467                 BUG_ON(iop_chan_is_busy(iop_chan));
1468
1469                 /* clear any prior error-status bits */
1470                 iop_adma_device_clear_err_status(iop_chan);
1471
1472                 /* disable operation */
1473                 iop_chan_disable(iop_chan);
1474
1475                 /* set the descriptor address */
1476                 iop_chan_set_next_descriptor(iop_chan, sw_desc->async_tx.phys);
1477
1478                 /* 1/ don't add pre-chained descriptors
1479                  * 2/ dummy read to flush next_desc write
1480                  */
1481                 BUG_ON(iop_desc_get_next_desc(sw_desc));
1482
1483                 /* run the descriptor */
1484                 iop_chan_enable(iop_chan);
1485         } else
1486                 dev_err(iop_chan->device->common.dev,
1487                         "failed to allocate null descriptor\n");
1488         spin_unlock_bh(&iop_chan->lock);
1489 }
1490
1491 static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan)
1492 {
1493         struct iop_adma_desc_slot *sw_desc, *grp_start;
1494         dma_cookie_t cookie;
1495         int slot_cnt, slots_per_op;
1496
1497         dev_dbg(iop_chan->device->common.dev, "%s\n", __func__);
1498
1499         spin_lock_bh(&iop_chan->lock);
1500         slot_cnt = iop_chan_xor_slot_count(0, 2, &slots_per_op);
1501         sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
1502         if (sw_desc) {
1503                 grp_start = sw_desc->group_head;
1504                 list_splice_init(&sw_desc->tx_list, &iop_chan->chain);
1505                 async_tx_ack(&sw_desc->async_tx);
1506                 iop_desc_init_null_xor(grp_start, 2, 0);
1507                 iop_desc_set_byte_count(grp_start, iop_chan, 0);
1508                 iop_desc_set_dest_addr(grp_start, iop_chan, 0);
1509                 iop_desc_set_xor_src_addr(grp_start, 0, 0);
1510                 iop_desc_set_xor_src_addr(grp_start, 1, 0);
1511
1512                 cookie = dma_cookie_assign(&sw_desc->async_tx);
1513
1514                 /* initialize the completed cookie to be less than
1515                  * the most recently used cookie
1516                  */
1517                 iop_chan->common.completed_cookie = cookie - 1;
1518
1519                 /* channel should not be busy */
1520                 BUG_ON(iop_chan_is_busy(iop_chan));
1521
1522                 /* clear any prior error-status bits */
1523                 iop_adma_device_clear_err_status(iop_chan);
1524
1525                 /* disable operation */
1526                 iop_chan_disable(iop_chan);
1527
1528                 /* set the descriptor address */
1529                 iop_chan_set_next_descriptor(iop_chan, sw_desc->async_tx.phys);
1530
1531                 /* 1/ don't add pre-chained descriptors
1532                  * 2/ dummy read to flush next_desc write
1533                  */
1534                 BUG_ON(iop_desc_get_next_desc(sw_desc));
1535
1536                 /* run the descriptor */
1537                 iop_chan_enable(iop_chan);
1538         } else
1539                 dev_err(iop_chan->device->common.dev,
1540                         "failed to allocate null descriptor\n");
1541         spin_unlock_bh(&iop_chan->lock);
1542 }
1543
1544 static struct platform_driver iop_adma_driver = {
1545         .probe          = iop_adma_probe,
1546         .remove         = iop_adma_remove,
1547         .driver         = {
1548                 .name   = "iop-adma",
1549         },
1550 };
1551
1552 module_platform_driver(iop_adma_driver);
1553
1554 MODULE_AUTHOR("Intel Corporation");
1555 MODULE_DESCRIPTION("IOP ADMA Engine Driver");
1556 MODULE_LICENSE("GPL");
1557 MODULE_ALIAS("platform:iop-adma");