2 * Intel I/OAT DMA Linux driver
3 * Copyright(c) 2004 - 2015 Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * The full GNU General Public License is included in this distribution in
15 * the file called "COPYING".
20 * This driver supports an Intel I/OAT DMA engine, which does asynchronous
24 #include <linux/init.h>
25 #include <linux/module.h>
26 #include <linux/slab.h>
27 #include <linux/pci.h>
28 #include <linux/interrupt.h>
29 #include <linux/dmaengine.h>
30 #include <linux/delay.h>
31 #include <linux/dma-mapping.h>
32 #include <linux/workqueue.h>
33 #include <linux/prefetch.h>
34 #include <linux/sizes.h>
36 #include "registers.h"
39 #include "../dmaengine.h"
41 static char *chanerr_str[] = {
42 "DMA Transfer Source Address Error",
43 "DMA Transfer Destination Address Error",
44 "Next Descriptor Address Error",
46 "Chan Address Value Error",
48 "Chipset Uncorrectable Data Integrity Error",
49 "DMA Uncorrectable Data Integrity Error",
52 "Descriptor Control Error",
53 "Descriptor Transfer Size Error",
54 "Completion Address Error",
55 "Interrupt Configuration Error",
56 "Super extended descriptor Address Error",
60 "Descriptor Count Error",
61 "DIF All F detect Error",
62 "Guard Tag verification Error",
63 "Application Tag verification Error",
64 "Reference Tag verification Error",
66 "Result DIF All F detect Error",
67 "Result Guard Tag verification Error",
68 "Result Application Tag verification Error",
69 "Result Reference Tag verification Error",
72 static void ioat_eh(struct ioatdma_chan *ioat_chan);
74 static void ioat_print_chanerrs(struct ioatdma_chan *ioat_chan, u32 chanerr)
78 for (i = 0; i < ARRAY_SIZE(chanerr_str); i++) {
79 if ((chanerr >> i) & 1) {
80 dev_err(to_dev(ioat_chan), "Err(%d): %s\n",
87 * ioat_dma_do_interrupt - handler used for single vector interrupt mode
89 * @data: interrupt data
91 irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
93 struct ioatdma_device *instance = data;
94 struct ioatdma_chan *ioat_chan;
95 unsigned long attnstatus;
99 intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET);
101 if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN))
104 if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) {
105 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
109 attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
110 for_each_set_bit(bit, &attnstatus, BITS_PER_LONG) {
111 ioat_chan = ioat_chan_by_index(instance, bit);
112 if (test_bit(IOAT_RUN, &ioat_chan->state))
113 tasklet_schedule(&ioat_chan->cleanup_task);
116 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
121 * ioat_dma_do_interrupt_msix - handler used for vector-per-channel interrupt mode
123 * @data: interrupt data
125 irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
127 struct ioatdma_chan *ioat_chan = data;
129 if (test_bit(IOAT_RUN, &ioat_chan->state))
130 tasklet_schedule(&ioat_chan->cleanup_task);
135 void ioat_stop(struct ioatdma_chan *ioat_chan)
137 struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
138 struct pci_dev *pdev = ioat_dma->pdev;
139 int chan_id = chan_num(ioat_chan);
140 struct msix_entry *msix;
142 /* 1/ stop irq from firing tasklets
143 * 2/ stop the tasklet from re-arming irqs
145 clear_bit(IOAT_RUN, &ioat_chan->state);
147 /* flush inflight interrupts */
148 switch (ioat_dma->irq_mode) {
150 msix = &ioat_dma->msix_entries[chan_id];
151 synchronize_irq(msix->vector);
155 synchronize_irq(pdev->irq);
161 /* flush inflight timers */
162 del_timer_sync(&ioat_chan->timer);
164 /* flush inflight tasklet runs */
165 tasklet_kill(&ioat_chan->cleanup_task);
167 /* final cleanup now that everything is quiesced and can't re-arm */
168 ioat_cleanup_event((unsigned long)&ioat_chan->dma_chan);
171 static void __ioat_issue_pending(struct ioatdma_chan *ioat_chan)
173 ioat_chan->dmacount += ioat_ring_pending(ioat_chan);
174 ioat_chan->issued = ioat_chan->head;
175 writew(ioat_chan->dmacount,
176 ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
177 dev_dbg(to_dev(ioat_chan),
178 "%s: head: %#x tail: %#x issued: %#x count: %#x\n",
179 __func__, ioat_chan->head, ioat_chan->tail,
180 ioat_chan->issued, ioat_chan->dmacount);
183 void ioat_issue_pending(struct dma_chan *c)
185 struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
187 if (ioat_ring_pending(ioat_chan)) {
188 spin_lock_bh(&ioat_chan->prep_lock);
189 __ioat_issue_pending(ioat_chan);
190 spin_unlock_bh(&ioat_chan->prep_lock);
195 * ioat_update_pending - log pending descriptors
196 * @ioat: ioat+ channel
198 * Check if the number of unsubmitted descriptors has exceeded the
199 * watermark. Called with prep_lock held
201 static void ioat_update_pending(struct ioatdma_chan *ioat_chan)
203 if (ioat_ring_pending(ioat_chan) > ioat_pending_level)
204 __ioat_issue_pending(ioat_chan);
207 static void __ioat_start_null_desc(struct ioatdma_chan *ioat_chan)
209 struct ioat_ring_ent *desc;
210 struct ioat_dma_descriptor *hw;
212 if (ioat_ring_space(ioat_chan) < 1) {
213 dev_err(to_dev(ioat_chan),
214 "Unable to start null desc - ring full\n");
218 dev_dbg(to_dev(ioat_chan),
219 "%s: head: %#x tail: %#x issued: %#x\n",
220 __func__, ioat_chan->head, ioat_chan->tail, ioat_chan->issued);
221 desc = ioat_get_ring_ent(ioat_chan, ioat_chan->head);
226 hw->ctl_f.int_en = 1;
227 hw->ctl_f.compl_write = 1;
228 /* set size to non-zero value (channel returns error when size is 0) */
229 hw->size = NULL_DESC_BUFFER_SIZE;
232 async_tx_ack(&desc->txd);
233 ioat_set_chainaddr(ioat_chan, desc->txd.phys);
234 dump_desc_dbg(ioat_chan, desc);
235 /* make sure descriptors are written before we submit */
237 ioat_chan->head += 1;
238 __ioat_issue_pending(ioat_chan);
241 void ioat_start_null_desc(struct ioatdma_chan *ioat_chan)
243 spin_lock_bh(&ioat_chan->prep_lock);
244 if (!test_bit(IOAT_CHAN_DOWN, &ioat_chan->state))
245 __ioat_start_null_desc(ioat_chan);
246 spin_unlock_bh(&ioat_chan->prep_lock);
249 static void __ioat_restart_chan(struct ioatdma_chan *ioat_chan)
251 /* set the tail to be re-issued */
252 ioat_chan->issued = ioat_chan->tail;
253 ioat_chan->dmacount = 0;
254 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
256 dev_dbg(to_dev(ioat_chan),
257 "%s: head: %#x tail: %#x issued: %#x count: %#x\n",
258 __func__, ioat_chan->head, ioat_chan->tail,
259 ioat_chan->issued, ioat_chan->dmacount);
261 if (ioat_ring_pending(ioat_chan)) {
262 struct ioat_ring_ent *desc;
264 desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail);
265 ioat_set_chainaddr(ioat_chan, desc->txd.phys);
266 __ioat_issue_pending(ioat_chan);
268 __ioat_start_null_desc(ioat_chan);
271 static int ioat_quiesce(struct ioatdma_chan *ioat_chan, unsigned long tmo)
273 unsigned long end = jiffies + tmo;
277 status = ioat_chansts(ioat_chan);
278 if (is_ioat_active(status) || is_ioat_idle(status))
279 ioat_suspend(ioat_chan);
280 while (is_ioat_active(status) || is_ioat_idle(status)) {
281 if (tmo && time_after(jiffies, end)) {
285 status = ioat_chansts(ioat_chan);
292 static int ioat_reset_sync(struct ioatdma_chan *ioat_chan, unsigned long tmo)
294 unsigned long end = jiffies + tmo;
297 ioat_reset(ioat_chan);
298 while (ioat_reset_pending(ioat_chan)) {
299 if (end && time_after(jiffies, end)) {
309 static dma_cookie_t ioat_tx_submit_unlock(struct dma_async_tx_descriptor *tx)
310 __releases(&ioat_chan->prep_lock)
312 struct dma_chan *c = tx->chan;
313 struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
316 cookie = dma_cookie_assign(tx);
317 dev_dbg(to_dev(ioat_chan), "%s: cookie: %d\n", __func__, cookie);
319 if (!test_and_set_bit(IOAT_CHAN_ACTIVE, &ioat_chan->state))
320 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
322 /* make descriptor updates visible before advancing ioat->head,
323 * this is purposefully not smp_wmb() since we are also
324 * publishing the descriptor updates to a dma device
328 ioat_chan->head += ioat_chan->produce;
330 ioat_update_pending(ioat_chan);
331 spin_unlock_bh(&ioat_chan->prep_lock);
336 static struct ioat_ring_ent *
337 ioat_alloc_ring_ent(struct dma_chan *chan, int idx, gfp_t flags)
339 struct ioat_dma_descriptor *hw;
340 struct ioat_ring_ent *desc;
341 struct ioatdma_chan *ioat_chan = to_ioat_chan(chan);
347 chunk = idx / IOAT_DESCS_PER_2M;
348 idx &= (IOAT_DESCS_PER_2M - 1);
349 offs = idx * IOAT_DESC_SZ;
350 pos = (u8 *)ioat_chan->descs[chunk].virt + offs;
351 phys = ioat_chan->descs[chunk].hw + offs;
352 hw = (struct ioat_dma_descriptor *)pos;
353 memset(hw, 0, sizeof(*hw));
355 desc = kmem_cache_zalloc(ioat_cache, flags);
359 dma_async_tx_descriptor_init(&desc->txd, chan);
360 desc->txd.tx_submit = ioat_tx_submit_unlock;
362 desc->txd.phys = phys;
366 void ioat_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan)
368 kmem_cache_free(ioat_cache, desc);
371 struct ioat_ring_ent **
372 ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags)
374 struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
375 struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
376 struct ioat_ring_ent **ring;
377 int total_descs = 1 << order;
380 /* allocate the array to hold the software ring */
381 ring = kcalloc(total_descs, sizeof(*ring), flags);
385 ioat_chan->desc_chunks = chunks = (total_descs * IOAT_DESC_SZ) / SZ_2M;
387 for (i = 0; i < chunks; i++) {
388 struct ioat_descs *descs = &ioat_chan->descs[i];
390 descs->virt = dma_alloc_coherent(to_dev(ioat_chan),
391 SZ_2M, &descs->hw, flags);
392 if (!descs->virt && (i > 0)) {
395 for (idx = 0; idx < i; idx++) {
396 dma_free_coherent(to_dev(ioat_chan), SZ_2M,
397 descs->virt, descs->hw);
402 ioat_chan->desc_chunks = 0;
408 for (i = 0; i < total_descs; i++) {
409 ring[i] = ioat_alloc_ring_ent(c, i, flags);
414 ioat_free_ring_ent(ring[i], c);
416 for (idx = 0; idx < ioat_chan->desc_chunks; idx++) {
417 dma_free_coherent(to_dev(ioat_chan),
419 ioat_chan->descs[idx].virt,
420 ioat_chan->descs[idx].hw);
421 ioat_chan->descs[idx].virt = NULL;
422 ioat_chan->descs[idx].hw = 0;
425 ioat_chan->desc_chunks = 0;
429 set_desc_id(ring[i], i);
433 for (i = 0; i < total_descs-1; i++) {
434 struct ioat_ring_ent *next = ring[i+1];
435 struct ioat_dma_descriptor *hw = ring[i]->hw;
437 hw->next = next->txd.phys;
439 ring[i]->hw->next = ring[0]->txd.phys;
441 /* setup descriptor pre-fetching for v3.4 */
442 if (ioat_dma->cap & IOAT_CAP_DPS) {
443 u16 drsctl = IOAT_CHAN_DRSZ_2MB | IOAT_CHAN_DRS_EN;
446 drsctl |= IOAT_CHAN_DRS_AUTOWRAP;
448 writew(drsctl, ioat_chan->reg_base + IOAT_CHAN_DRSCTL_OFFSET);
456 * ioat_check_space_lock - verify space and grab ring producer lock
457 * @ioat: ioat,3 channel (ring) to operate on
458 * @num_descs: allocation length
460 int ioat_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs)
461 __acquires(&ioat_chan->prep_lock)
463 spin_lock_bh(&ioat_chan->prep_lock);
464 /* never allow the last descriptor to be consumed, we need at
465 * least one free at all times to allow for on-the-fly ring
468 if (likely(ioat_ring_space(ioat_chan) > num_descs)) {
469 dev_dbg(to_dev(ioat_chan), "%s: num_descs: %d (%x:%x:%x)\n",
470 __func__, num_descs, ioat_chan->head,
471 ioat_chan->tail, ioat_chan->issued);
472 ioat_chan->produce = num_descs;
473 return 0; /* with ioat->prep_lock held */
475 spin_unlock_bh(&ioat_chan->prep_lock);
477 dev_dbg_ratelimited(to_dev(ioat_chan),
478 "%s: ring full! num_descs: %d (%x:%x:%x)\n",
479 __func__, num_descs, ioat_chan->head,
480 ioat_chan->tail, ioat_chan->issued);
482 /* progress reclaim in the allocation failure case we may be
483 * called under bh_disabled so we need to trigger the timer
486 if (time_is_before_jiffies(ioat_chan->timer.expires)
487 && timer_pending(&ioat_chan->timer)) {
488 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
489 ioat_timer_event(&ioat_chan->timer);
495 static bool desc_has_ext(struct ioat_ring_ent *desc)
497 struct ioat_dma_descriptor *hw = desc->hw;
499 if (hw->ctl_f.op == IOAT_OP_XOR ||
500 hw->ctl_f.op == IOAT_OP_XOR_VAL) {
501 struct ioat_xor_descriptor *xor = desc->xor;
503 if (src_cnt_to_sw(xor->ctl_f.src_cnt) > 5)
505 } else if (hw->ctl_f.op == IOAT_OP_PQ ||
506 hw->ctl_f.op == IOAT_OP_PQ_VAL) {
507 struct ioat_pq_descriptor *pq = desc->pq;
509 if (src_cnt_to_sw(pq->ctl_f.src_cnt) > 3)
517 ioat_free_sed(struct ioatdma_device *ioat_dma, struct ioat_sed_ent *sed)
522 dma_pool_free(ioat_dma->sed_hw_pool[sed->hw_pool], sed->hw, sed->dma);
523 kmem_cache_free(ioat_sed_cache, sed);
526 static u64 ioat_get_current_completion(struct ioatdma_chan *ioat_chan)
531 completion = *ioat_chan->completion;
532 phys_complete = ioat_chansts_to_addr(completion);
534 dev_dbg(to_dev(ioat_chan), "%s: phys_complete: %#llx\n", __func__,
535 (unsigned long long) phys_complete);
537 return phys_complete;
540 static bool ioat_cleanup_preamble(struct ioatdma_chan *ioat_chan,
543 *phys_complete = ioat_get_current_completion(ioat_chan);
544 if (*phys_complete == ioat_chan->last_completion)
547 clear_bit(IOAT_COMPLETION_ACK, &ioat_chan->state);
548 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
554 desc_get_errstat(struct ioatdma_chan *ioat_chan, struct ioat_ring_ent *desc)
556 struct ioat_dma_descriptor *hw = desc->hw;
558 switch (hw->ctl_f.op) {
560 case IOAT_OP_PQ_VAL_16S:
562 struct ioat_pq_descriptor *pq = desc->pq;
564 /* check if there's error written */
565 if (!pq->dwbes_f.wbes)
568 /* need to set a chanerr var for checking to clear later */
570 if (pq->dwbes_f.p_val_err)
571 *desc->result |= SUM_CHECK_P_RESULT;
573 if (pq->dwbes_f.q_val_err)
574 *desc->result |= SUM_CHECK_Q_RESULT;
584 * __cleanup - reclaim used descriptors
585 * @ioat: channel (ring) to clean
587 static void __cleanup(struct ioatdma_chan *ioat_chan, dma_addr_t phys_complete)
589 struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
590 struct ioat_ring_ent *desc;
591 bool seen_current = false;
592 int idx = ioat_chan->tail, i;
595 dev_dbg(to_dev(ioat_chan), "%s: head: %#x tail: %#x issued: %#x\n",
596 __func__, ioat_chan->head, ioat_chan->tail, ioat_chan->issued);
599 * At restart of the channel, the completion address and the
600 * channel status will be 0 due to starting a new chain. Since
601 * it's new chain and the first descriptor "fails", there is
602 * nothing to clean up. We do not want to reap the entire submitted
603 * chain due to this 0 address value and then BUG.
608 active = ioat_ring_active(ioat_chan);
609 for (i = 0; i < active && !seen_current; i++) {
610 struct dma_async_tx_descriptor *tx;
612 prefetch(ioat_get_ring_ent(ioat_chan, idx + i + 1));
613 desc = ioat_get_ring_ent(ioat_chan, idx + i);
614 dump_desc_dbg(ioat_chan, desc);
616 /* set err stat if we are using dwbes */
617 if (ioat_dma->cap & IOAT_CAP_DWBES)
618 desc_get_errstat(ioat_chan, desc);
622 dma_cookie_complete(tx);
623 dma_descriptor_unmap(tx);
624 dmaengine_desc_get_callback_invoke(tx, NULL);
626 tx->callback_result = NULL;
629 if (tx->phys == phys_complete)
632 /* skip extended descriptors */
633 if (desc_has_ext(desc)) {
634 BUG_ON(i + 1 >= active);
638 /* cleanup super extended descriptors */
640 ioat_free_sed(ioat_dma, desc->sed);
645 /* finish all descriptor reads before incrementing tail */
647 ioat_chan->tail = idx + i;
648 /* no active descs have written a completion? */
649 BUG_ON(active && !seen_current);
650 ioat_chan->last_completion = phys_complete;
652 if (active - i == 0) {
653 dev_dbg(to_dev(ioat_chan), "%s: cancel completion timeout\n",
655 mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
658 /* microsecond delay by sysfs variable per pending descriptor */
659 if (ioat_chan->intr_coalesce != ioat_chan->prev_intr_coalesce) {
660 writew(min((ioat_chan->intr_coalesce * (active - i)),
661 IOAT_INTRDELAY_MASK),
662 ioat_chan->ioat_dma->reg_base + IOAT_INTRDELAY_OFFSET);
663 ioat_chan->prev_intr_coalesce = ioat_chan->intr_coalesce;
667 static void ioat_cleanup(struct ioatdma_chan *ioat_chan)
671 spin_lock_bh(&ioat_chan->cleanup_lock);
673 if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
674 __cleanup(ioat_chan, phys_complete);
676 if (is_ioat_halted(*ioat_chan->completion)) {
677 u32 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
680 (IOAT_CHANERR_HANDLE_MASK | IOAT_CHANERR_RECOVER_MASK)) {
681 mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
686 spin_unlock_bh(&ioat_chan->cleanup_lock);
689 void ioat_cleanup_event(unsigned long data)
691 struct ioatdma_chan *ioat_chan = to_ioat_chan((void *)data);
693 ioat_cleanup(ioat_chan);
694 if (!test_bit(IOAT_RUN, &ioat_chan->state))
696 writew(IOAT_CHANCTRL_RUN, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET);
699 static void ioat_restart_channel(struct ioatdma_chan *ioat_chan)
703 /* set the completion address register again */
704 writel(lower_32_bits(ioat_chan->completion_dma),
705 ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
706 writel(upper_32_bits(ioat_chan->completion_dma),
707 ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
709 ioat_quiesce(ioat_chan, 0);
710 if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
711 __cleanup(ioat_chan, phys_complete);
713 __ioat_restart_chan(ioat_chan);
717 static void ioat_abort_descs(struct ioatdma_chan *ioat_chan)
719 struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
720 struct ioat_ring_ent *desc;
722 int idx = ioat_chan->tail, i;
725 * We assume that the failed descriptor has been processed.
726 * Now we are just returning all the remaining submitted
727 * descriptors to abort.
729 active = ioat_ring_active(ioat_chan);
731 /* we skip the failed descriptor that tail points to */
732 for (i = 1; i < active; i++) {
733 struct dma_async_tx_descriptor *tx;
735 prefetch(ioat_get_ring_ent(ioat_chan, idx + i + 1));
736 desc = ioat_get_ring_ent(ioat_chan, idx + i);
740 struct dmaengine_result res;
742 dma_cookie_complete(tx);
743 dma_descriptor_unmap(tx);
744 res.result = DMA_TRANS_ABORTED;
745 dmaengine_desc_get_callback_invoke(tx, &res);
747 tx->callback_result = NULL;
750 /* skip extended descriptors */
751 if (desc_has_ext(desc)) {
752 WARN_ON(i + 1 >= active);
756 /* cleanup super extended descriptors */
758 ioat_free_sed(ioat_dma, desc->sed);
763 smp_mb(); /* finish all descriptor reads before incrementing tail */
764 ioat_chan->tail = idx + active;
766 desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail);
767 ioat_chan->last_completion = *ioat_chan->completion = desc->txd.phys;
770 static void ioat_eh(struct ioatdma_chan *ioat_chan)
772 struct pci_dev *pdev = to_pdev(ioat_chan);
773 struct ioat_dma_descriptor *hw;
774 struct dma_async_tx_descriptor *tx;
776 struct ioat_ring_ent *desc;
781 struct dmaengine_result res;
783 /* cleanup so tail points to descriptor that caused the error */
784 if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
785 __cleanup(ioat_chan, phys_complete);
787 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
788 pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr_int);
790 dev_dbg(to_dev(ioat_chan), "%s: error = %x:%x\n",
791 __func__, chanerr, chanerr_int);
793 desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail);
795 dump_desc_dbg(ioat_chan, desc);
797 switch (hw->ctl_f.op) {
798 case IOAT_OP_XOR_VAL:
799 if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) {
800 *desc->result |= SUM_CHECK_P_RESULT;
801 err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR;
805 case IOAT_OP_PQ_VAL_16S:
806 if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) {
807 *desc->result |= SUM_CHECK_P_RESULT;
808 err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR;
810 if (chanerr & IOAT_CHANERR_XOR_Q_ERR) {
811 *desc->result |= SUM_CHECK_Q_RESULT;
812 err_handled |= IOAT_CHANERR_XOR_Q_ERR;
817 if (chanerr & IOAT_CHANERR_RECOVER_MASK) {
818 if (chanerr & IOAT_CHANERR_READ_DATA_ERR) {
819 res.result = DMA_TRANS_READ_FAILED;
820 err_handled |= IOAT_CHANERR_READ_DATA_ERR;
821 } else if (chanerr & IOAT_CHANERR_WRITE_DATA_ERR) {
822 res.result = DMA_TRANS_WRITE_FAILED;
823 err_handled |= IOAT_CHANERR_WRITE_DATA_ERR;
828 res.result = DMA_TRANS_NOERROR;
830 /* fault on unhandled error or spurious halt */
831 if (chanerr ^ err_handled || chanerr == 0) {
832 dev_err(to_dev(ioat_chan), "%s: fatal error (%x:%x)\n",
833 __func__, chanerr, err_handled);
834 dev_err(to_dev(ioat_chan), "Errors handled:\n");
835 ioat_print_chanerrs(ioat_chan, err_handled);
836 dev_err(to_dev(ioat_chan), "Errors not handled:\n");
837 ioat_print_chanerrs(ioat_chan, (chanerr & ~err_handled));
842 /* cleanup the faulty descriptor since we are continuing */
845 dma_cookie_complete(tx);
846 dma_descriptor_unmap(tx);
847 dmaengine_desc_get_callback_invoke(tx, &res);
849 tx->callback_result = NULL;
852 /* mark faulting descriptor as complete */
853 *ioat_chan->completion = desc->txd.phys;
855 spin_lock_bh(&ioat_chan->prep_lock);
856 /* we need abort all descriptors */
858 ioat_abort_descs(ioat_chan);
859 /* clean up the channel, we could be in weird state */
860 ioat_reset_hw(ioat_chan);
863 writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
864 pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr_int);
866 ioat_restart_channel(ioat_chan);
867 spin_unlock_bh(&ioat_chan->prep_lock);
870 static void check_active(struct ioatdma_chan *ioat_chan)
872 if (ioat_ring_active(ioat_chan)) {
873 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
877 if (test_and_clear_bit(IOAT_CHAN_ACTIVE, &ioat_chan->state))
878 mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
881 void ioat_timer_event(struct timer_list *t)
883 struct ioatdma_chan *ioat_chan = from_timer(ioat_chan, t, timer);
884 dma_addr_t phys_complete;
887 status = ioat_chansts(ioat_chan);
889 /* when halted due to errors check for channel
890 * programming errors before advancing the completion state
892 if (is_ioat_halted(status)) {
895 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
896 dev_err(to_dev(ioat_chan), "%s: Channel halted (%x)\n",
898 dev_err(to_dev(ioat_chan), "Errors:\n");
899 ioat_print_chanerrs(ioat_chan, chanerr);
901 if (test_bit(IOAT_RUN, &ioat_chan->state)) {
902 spin_lock_bh(&ioat_chan->cleanup_lock);
903 spin_lock_bh(&ioat_chan->prep_lock);
904 set_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
905 spin_unlock_bh(&ioat_chan->prep_lock);
907 ioat_abort_descs(ioat_chan);
908 dev_warn(to_dev(ioat_chan), "Reset channel...\n");
909 ioat_reset_hw(ioat_chan);
910 dev_warn(to_dev(ioat_chan), "Restart channel...\n");
911 ioat_restart_channel(ioat_chan);
913 spin_lock_bh(&ioat_chan->prep_lock);
914 clear_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
915 spin_unlock_bh(&ioat_chan->prep_lock);
916 spin_unlock_bh(&ioat_chan->cleanup_lock);
922 spin_lock_bh(&ioat_chan->cleanup_lock);
924 /* handle the no-actives case */
925 if (!ioat_ring_active(ioat_chan)) {
926 spin_lock_bh(&ioat_chan->prep_lock);
927 check_active(ioat_chan);
928 spin_unlock_bh(&ioat_chan->prep_lock);
929 spin_unlock_bh(&ioat_chan->cleanup_lock);
933 /* if we haven't made progress and we have already
934 * acknowledged a pending completion once, then be more
935 * forceful with a restart
937 if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
938 __cleanup(ioat_chan, phys_complete);
939 else if (test_bit(IOAT_COMPLETION_ACK, &ioat_chan->state)) {
942 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
943 dev_err(to_dev(ioat_chan), "CHANSTS: %#Lx CHANERR: %#x\n",
945 dev_err(to_dev(ioat_chan), "Errors:\n");
946 ioat_print_chanerrs(ioat_chan, chanerr);
948 dev_dbg(to_dev(ioat_chan), "Active descriptors: %d\n",
949 ioat_ring_active(ioat_chan));
951 spin_lock_bh(&ioat_chan->prep_lock);
952 set_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
953 spin_unlock_bh(&ioat_chan->prep_lock);
955 ioat_abort_descs(ioat_chan);
956 dev_warn(to_dev(ioat_chan), "Resetting channel...\n");
957 ioat_reset_hw(ioat_chan);
958 dev_warn(to_dev(ioat_chan), "Restarting channel...\n");
959 ioat_restart_channel(ioat_chan);
961 spin_lock_bh(&ioat_chan->prep_lock);
962 clear_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
963 spin_unlock_bh(&ioat_chan->prep_lock);
964 spin_unlock_bh(&ioat_chan->cleanup_lock);
967 set_bit(IOAT_COMPLETION_ACK, &ioat_chan->state);
969 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
970 spin_unlock_bh(&ioat_chan->cleanup_lock);
974 ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie,
975 struct dma_tx_state *txstate)
977 struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
980 ret = dma_cookie_status(c, cookie, txstate);
981 if (ret == DMA_COMPLETE)
984 ioat_cleanup(ioat_chan);
986 return dma_cookie_status(c, cookie, txstate);
989 int ioat_reset_hw(struct ioatdma_chan *ioat_chan)
991 /* throw away whatever the channel was doing and get it
992 * initialized, with ioat3 specific workarounds
994 struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
995 struct pci_dev *pdev = ioat_dma->pdev;
1000 ioat_quiesce(ioat_chan, msecs_to_jiffies(100));
1002 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
1003 writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
1005 if (ioat_dma->version < IOAT_VER_3_3) {
1006 /* clear any pending errors */
1007 err = pci_read_config_dword(pdev,
1008 IOAT_PCI_CHANERR_INT_OFFSET, &chanerr);
1011 "channel error register unreachable\n");
1014 pci_write_config_dword(pdev,
1015 IOAT_PCI_CHANERR_INT_OFFSET, chanerr);
1017 /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
1018 * (workaround for spurious config parity error after restart)
1020 pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id);
1021 if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) {
1022 pci_write_config_dword(pdev,
1023 IOAT_PCI_DMAUNCERRSTS_OFFSET,
1028 if (is_bwd_ioat(pdev) && (ioat_dma->irq_mode == IOAT_MSIX)) {
1029 ioat_dma->msixtba0 = readq(ioat_dma->reg_base + 0x1000);
1030 ioat_dma->msixdata0 = readq(ioat_dma->reg_base + 0x1008);
1031 ioat_dma->msixpba = readq(ioat_dma->reg_base + 0x1800);
1035 err = ioat_reset_sync(ioat_chan, msecs_to_jiffies(200));
1037 if (is_bwd_ioat(pdev) && (ioat_dma->irq_mode == IOAT_MSIX)) {
1038 writeq(ioat_dma->msixtba0, ioat_dma->reg_base + 0x1000);
1039 writeq(ioat_dma->msixdata0, ioat_dma->reg_base + 0x1008);
1040 writeq(ioat_dma->msixpba, ioat_dma->reg_base + 0x1800);
1045 dev_err(&pdev->dev, "Failed to reset: %d\n", err);